update
diff --git a/charts/.gitignore b/charts/.gitignore
new file mode 100644
index 0000000..79a1f54
--- /dev/null
+++ b/charts/.gitignore
@@ -0,0 +1,2 @@
+!*
+.DS_Store
\ No newline at end of file
diff --git a/charts/app-repository/.helmignore b/charts/app-repository/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/app-repository/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/app-repository/Chart.yaml b/charts/app-repository/Chart.yaml
new file mode 100644
index 0000000..63ac303
--- /dev/null
+++ b/charts/app-repository/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: app-repository
+description: A Helm chart for PCloud App Repository
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/app-repository/templates/install.yaml b/charts/app-repository/templates/install.yaml
new file mode 100644
index 0000000..9d592bb
--- /dev/null
+++ b/charts/app-repository/templates/install.yaml
@@ -0,0 +1,73 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: app-repository
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: app-repository
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: ingress
+ namespace: {{ .Release.Namespace }}
+ {{- if .Values.certificateIssuer }}
+ annotations:
+ acme.cert-manager.io/http01-edit-in-place: "true"
+ cert-manager.io/cluster-issuer: {{ .Values.certificateIssuer }}
+ {{- end }}
+spec:
+ ingressClassName: {{ .Values.ingressClassName }}
+ {{- if .Values.certificateIssuer }}
+ tls:
+ - hosts:
+ - {{ .Values.domain }}
+ secretName: cert-app-repository
+ {{- end }}
+ rules:
+ - host: {{ .Values.domain }}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: app-repository
+ port:
+ name: http
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: app-repository
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: app-repository
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: app-repository
+ spec:
+ containers:
+ - name: app-repository
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: 8080
+ protocol: TCP
+ command:
+ - app-repository
+ - --port=8080
+ - --apps-dir={{ .Values.appsDir }}
+ - --scheme-with-host=https://{{ .Values.domain }}
diff --git a/charts/app-repository/values.yaml b/charts/app-repository/values.yaml
new file mode 100644
index 0000000..1439898
--- /dev/null
+++ b/charts/app-repository/values.yaml
@@ -0,0 +1,8 @@
+image:
+ repository: giolekva/app-repository
+ tag: latest
+ pullPolicy: Always
+domain: example.com
+appsDir: /pcloud/apps
+certificateIssuer: ""
+ingressClassName: ""
diff --git a/charts/app-runner/.helmignore b/charts/app-runner/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/app-runner/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/app-runner/Chart.yaml b/charts/app-runner/Chart.yaml
new file mode 100644
index 0000000..b1bad90
--- /dev/null
+++ b/charts/app-runner/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: app-runner
+description: A Helm chart for PCloud App Runner
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/app-runner/templates/install.yaml b/charts/app-runner/templates/install.yaml
new file mode 100644
index 0000000..f89cc79
--- /dev/null
+++ b/charts/app-runner/templates/install.yaml
@@ -0,0 +1,112 @@
+{{ $runCfg := .Values.runCfg | b64dec }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: app-ssh-key
+type: Opaque
+data:
+ private: {{ .Values.sshPrivateKey }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: app-run-cfg
+data:
+ run: |
+{{ indent 4 $runCfg }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: app-app
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: app-app
+ ports:
+ - name: app
+ port: 80
+ targetPort: app
+ protocol: TCP
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: app-api
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: app-app
+ ports:
+ - name: api
+ port: 3000
+ targetPort: api
+ protocol: TCP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: app-app
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: app-app
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: app-app
+ spec:
+ runtimeClassName: {{ .Values.runtimeClassName }}
+ volumes:
+ - name: ssh-key
+ secret:
+ secretName: app-ssh-key
+ - name: run-cfg
+ configMap:
+ name: app-run-cfg
+ {{- range .Values.volumes }}
+ - name: volume-{{ .name }}
+ persistentVolumeClaim:
+ claimName: {{ .name }}
+ {{- end }}
+ containers:
+ - name: app
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: api
+ containerPort: 3000
+ protocol: TCP
+ - name: app
+ containerPort: {{ .Values.appPort }}
+ protocol: TCP
+ env:
+ - name: SELF_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ command:
+ - app-runner
+ - --port=3000
+ - --app-id={{ .Values.appId }}
+ - --app-dir={{ .Values.appDir }}
+ - --repo-addr={{ .Values.repoAddr }}
+ - --ssh-key=/pcloud/ssh-key/private
+ - --run-cfg=/pcloud/config/run
+ - --manager-addr={{ .Values.managerAddr }}
+ volumeMounts:
+ - name: ssh-key
+ readOnly: true
+ mountPath: /pcloud/ssh-key
+ - name: run-cfg
+ readOnly: true
+ mountPath: /pcloud/config
+ {{- range .Values.volumes }}
+ - name: volume-{{ .name }}
+ mountPath: {{ .mountPath }}
+ {{- end }}
diff --git a/charts/app-runner/values.yaml b/charts/app-runner/values.yaml
new file mode 100644
index 0000000..afc9481
--- /dev/null
+++ b/charts/app-runner/values.yaml
@@ -0,0 +1,13 @@
+image:
+ repository: giolekva/app-runner
+ tag: latest
+ pullPolicy: Always
+repoAddr: 192.168.0.11
+sshPrivateKey: key
+appId: ""
+runCfg: ""
+appDir: /dodo-app
+appPort: 8080
+managerAddr: ""
+volumes: []
+runtimeClassName: ""
diff --git a/charts/appmanager/.helmignore b/charts/appmanager/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/appmanager/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/appmanager/Chart.yaml b/charts/appmanager/Chart.yaml
new file mode 100644
index 0000000..a37113d
--- /dev/null
+++ b/charts/appmanager/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: welcome
+description: A Helm chart for PCloud App Manager
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/appmanager/templates/install.yaml b/charts/appmanager/templates/install.yaml
new file mode 100644
index 0000000..96d6b08
--- /dev/null
+++ b/charts/appmanager/templates/install.yaml
@@ -0,0 +1,100 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ .Values.clusterRoleName }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - create
+- apiGroups:
+ - "batch"
+ resources:
+ - jobs
+ verbs:
+ - create
+- apiGroups:
+ - "helm.toolkit.fluxcd.io"
+ resources:
+ - helmreleases
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ .Values.clusterRoleName }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ .Values.clusterRoleName }}
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: {{ .Release.Namespace }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: ssh-key
+type: Opaque
+data:
+ private: {{ .Values.sshPrivateKey }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: appmanager
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: appmanager
+ ports:
+ - name: {{ .Values.portName }}
+ port: 80
+ targetPort: http
+ protocol: TCP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: appmanager
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: appmanager
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: appmanager
+ spec:
+ volumes:
+ - name: ssh-key
+ secret:
+ secretName: ssh-key
+ containers:
+ - name: appmanager
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: 8080
+ protocol: TCP
+ command:
+ - pcloud-installer
+ - appmanager
+ - --repo-addr={{ .Values.repoAddr }}
+ - --ssh-key=/pcloud/ssh-key/private
+ - --port=8080
+ {{- if .Values.appRepoAddr }}
+ - --app-repo-addr={{ .Values.appRepoAddr }}
+ {{- end}}
+ volumeMounts:
+ - name: ssh-key
+ readOnly: true
+ mountPath: /pcloud/ssh-key
diff --git a/charts/appmanager/values.yaml b/charts/appmanager/values.yaml
new file mode 100644
index 0000000..b1a2a5d
--- /dev/null
+++ b/charts/appmanager/values.yaml
@@ -0,0 +1,13 @@
+image:
+ repository: giolekva/pcloud-installer
+ tag: latest
+ pullPolicy: Always
+repoAddr: 192.168.0.11
+sshPrivateKey: key
+ingress:
+ className: example-ingress-private
+ domain: apps.p.example.com
+ certificateIssuer: example-private
+clusterRoleName: example-welcome
+appRepoAddr: ""
+portName: http
diff --git a/charts/auth-proxy/.helmignore b/charts/auth-proxy/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/auth-proxy/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/auth-proxy/Chart.yaml b/charts/auth-proxy/Chart.yaml
new file mode 100644
index 0000000..1578a6c
--- /dev/null
+++ b/charts/auth-proxy/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: auth-proxy
+description: A Helm chart for pCloud auth-proxy
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/auth-proxy/templates/install.yaml b/charts/auth-proxy/templates/install.yaml
new file mode 100644
index 0000000..09a1059
--- /dev/null
+++ b/charts/auth-proxy/templates/install.yaml
@@ -0,0 +1,47 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: auth-proxy
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: auth-proxy
+ ports:
+ - name: {{ .Values.portName }}
+ port: 80
+ targetPort: {{ .Values.portName }}
+ protocol: TCP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: auth-proxy
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: auth-proxy
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: auth-proxy
+ spec:
+ containers:
+ - name: auth-proxy
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: {{ .Values.portName }}
+ containerPort: 8080
+ protocol: TCP
+ command:
+ - server
+ - --port=8080
+ - --whoami-addr={{ .Values.whoAmIAddr }}
+ - --login-addr={{ .Values.loginAddr }}
+ - --membership-addr={{ .Values.membershipAddr }}
+ - --membership-public-addr={{ .Values.membershipPublicAddr }}
+ - --groups={{ .Values.groups }}
+ - --upstream={{ .Values.upstream }}
diff --git a/charts/auth-proxy/values.yaml b/charts/auth-proxy/values.yaml
new file mode 100644
index 0000000..742f4c9
--- /dev/null
+++ b/charts/auth-proxy/values.yaml
@@ -0,0 +1,11 @@
+image:
+ repository: giolekva/auth-proxy
+ tag: latest
+ pullPolicy: Always
+upstream: bar.svc.cluster.local
+whoAmIAddr: https://accounts.example.com/sessions/whoami
+loginAddr: https://accounts-ui.example.com/login
+membershipAddr: https://memberships.p.example.com/api/user
+membershipPublicAddr: https://memberships.p.example.com
+groups: ""
+portName: http
diff --git a/charts/auth/.helmignore b/charts/auth/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/auth/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/auth/Chart.lock b/charts/auth/Chart.lock
new file mode 100644
index 0000000..c8a1eed
--- /dev/null
+++ b/charts/auth/Chart.lock
@@ -0,0 +1,9 @@
+dependencies:
+- name: kratos
+ repository: https://k8s.ory.sh/helm/charts
+ version: 0.33.1
+- name: hydra
+ repository: https://k8s.ory.sh/helm/charts
+ version: 0.33.1
+digest: sha256:10ebc5a0091aa612b31d13ad09ab3ae9593e876b8dc27edcaf8e2fd553aab832
+generated: "2023-05-30T14:29:25.471201+04:00"
diff --git a/charts/auth/Chart.yaml b/charts/auth/Chart.yaml
new file mode 100644
index 0000000..f261203
--- /dev/null
+++ b/charts/auth/Chart.yaml
@@ -0,0 +1,13 @@
+apiVersion: v2
+name: auth
+description: A Helm chart for PCloud core authentication & authorization services
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
+dependencies:
+- name: kratos
+ version: v0.33.1
+ repository: https://k8s.ory.sh/helm/charts
+- name: hydra
+ version: v0.33.1
+ repository: https://k8s.ory.sh/helm/charts
diff --git a/charts/auth/charts/hydra-0.33.1.tgz b/charts/auth/charts/hydra-0.33.1.tgz
new file mode 100644
index 0000000..80d1080
--- /dev/null
+++ b/charts/auth/charts/hydra-0.33.1.tgz
Binary files differ
diff --git a/charts/auth/charts/kratos-0.33.1.tgz b/charts/auth/charts/kratos-0.33.1.tgz
new file mode 100644
index 0000000..17bdd12
--- /dev/null
+++ b/charts/auth/charts/kratos-0.33.1.tgz
Binary files differ
diff --git a/charts/auth/templates/certificates.yaml b/charts/auth/templates/certificates.yaml
new file mode 100644
index 0000000..8a3c209
--- /dev/null
+++ b/charts/auth/templates/certificates.yaml
@@ -0,0 +1,44 @@
+# apiVersion: cert-manager.io/v1
+# kind: Certificate
+# metadata:
+# name: accounts-ui.{{ .Values.ui.domain }}
+# namespace: {{ .Release.Namespace }}
+# annotations:
+# helm.sh/resource-policy: keep
+# spec:
+# dnsNames:
+# - "accounts-ui.{{ .Values.ui.domain }}"
+# issuerRef:
+# name: {{ .Values.ui.certificateIssuer }}
+# kind: ClusterIssuer
+# secretName: cert-accounts-ui.{{ .Values.ui.domain }}
+# ---
+# apiVersion: cert-manager.io/v1
+# kind: Certificate
+# metadata:
+# name: accounts.{{ .Values.ui.domain }}
+# namespace: {{ .Release.Namespace }}
+# annotations:
+# helm.sh/resource-policy: keep
+# spec:
+# dnsNames:
+# - "accounts.{{ .Values.ui.domain }}"
+# issuerRef:
+# name: {{ .Values.ui.certificateIssuer }}
+# kind: ClusterIssuer
+# secretName: cert-accounts.{{ .Values.ui.domain }}
+# ---
+# apiVersion: cert-manager.io/v1
+# kind: Certificate
+# metadata:
+# name: hydra.{{ .Values.ui.domain }}
+# namespace: {{ .Release.Namespace }}
+# annotations:
+# helm.sh/resource-policy: keep
+# spec:
+# dnsNames:
+# - "hydra.{{ .Values.ui.domain }}"
+# issuerRef:
+# name: {{ .Values.ui.certificateIssuer }}
+# kind: ClusterIssuer
+# secretName: cert-hydra.{{ .Values.ui.domain }}
diff --git a/charts/auth/templates/ui.yaml b/charts/auth/templates/ui.yaml
new file mode 100644
index 0000000..ed8ba7b
--- /dev/null
+++ b/charts/auth/templates/ui.yaml
@@ -0,0 +1,94 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: ui
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: ui
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ protocol: TCP
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: api
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: ui
+ ports:
+ - name: http
+ port: 80
+ targetPort: api
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: ui
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ acme.cert-manager.io/http01-edit-in-place: "true"
+ cert-manager.io/cluster-issuer: {{ .Values.ui.certificateIssuer }}
+spec:
+ ingressClassName: {{ .Values.ui.ingressClassName }}
+ tls:
+ - hosts:
+ - accounts-ui.{{ .Values.ui.domain }}
+ secretName: cert-accounts-ui.{{ .Values.ui.domain }}
+ rules:
+ - host: accounts-ui.{{ .Values.ui.domain }}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: ui
+ port:
+ name: http
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: ui
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: ui
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: ui
+ spec:
+ containers:
+ - name: server
+ image: {{ .Values.ui.image.repository }}:{{ .Values.ui.image.tag }}
+ imagePullPolicy: {{ .Values.ui.image.pullPolicy }}
+ env:
+ - name: KRATOS_PUBLIC_URL
+ value: "https://accounts.{{ .Values.ui.domain }}"
+ ports:
+ - name: http
+ containerPort: 8080
+ protocol: TCP
+ - name: api
+ containerPort: 8081
+ protocol: TCP
+ command:
+ - server
+ - --port=8080
+ - --kratos=https://accounts.{{ .Values.ui.domain }}
+ - --hydra={{ .Values.ui.hydra }}
+ - --email-domain={{ .Values.ui.domain }}
+ - --api-port=8081
+ - --kratos-api=http://kratos-admin.{{ .Release.Namespace }}.svc.cluster.local
+ - --enable-registration={{ .Values.ui.enableRegistration }}
diff --git a/charts/auth/values.yaml b/charts/auth/values.yaml
new file mode 100644
index 0000000..ff9e10d
--- /dev/null
+++ b/charts/auth/values.yaml
@@ -0,0 +1,3 @@
+kratos: {}
+hydra: {}
+ui: {}
diff --git a/charts/cert-manager-webhook-gandi-role/Chart.yaml b/charts/cert-manager-webhook-gandi-role/Chart.yaml
new file mode 100644
index 0000000..54e3bf9
--- /dev/null
+++ b/charts/cert-manager-webhook-gandi-role/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: cert-manager-webhook-gandi-role
+description: A Helm chart for cert-manager role to let createn gandi resource
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/cert-manager-webhook-gandi-role/templates/role.yaml b/charts/cert-manager-webhook-gandi-role/templates/role.yaml
new file mode 100644
index 0000000..e45a7ab
--- /dev/null
+++ b/charts/cert-manager-webhook-gandi-role/templates/role.yaml
@@ -0,0 +1,24 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: cert-manager-gandi
+rules:
+- apiGroups:
+ - acme.bwolf.me
+ resources:
+ - gandi
+ verbs:
+ - "*" # TODO(giolekva): limit
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: cert-manager-gandi-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cert-manager-gandi
+subjects:
+- kind: ServiceAccount
+ name: {{ .Values.certManager.name }}
+ namespace: {{ .Values.certManager.namespace }}
diff --git a/charts/cert-manager-webhook-gandi-role/values.yaml b/charts/cert-manager-webhook-gandi-role/values.yaml
new file mode 100644
index 0000000..0b00b32
--- /dev/null
+++ b/charts/cert-manager-webhook-gandi-role/values.yaml
@@ -0,0 +1,3 @@
+certManager:
+ name: pcloud-cert-manager
+ namespace: pcloud-cert-manager
diff --git a/charts/cert-manager-webhook-gandi/.helmignore b/charts/cert-manager-webhook-gandi/.helmignore
new file mode 100644
index 0000000..f0c1319
--- /dev/null
+++ b/charts/cert-manager-webhook-gandi/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/charts/cert-manager-webhook-gandi/Chart.yaml b/charts/cert-manager-webhook-gandi/Chart.yaml
new file mode 100644
index 0000000..3c313c9
--- /dev/null
+++ b/charts/cert-manager-webhook-gandi/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v2
+description: A Helm chart for cert-manager-webhook-gandi
+name: cert-manager-webhook-gandi
+version: v0.2.0
diff --git a/charts/cert-manager-webhook-gandi/templates/_helpers.tpl b/charts/cert-manager-webhook-gandi/templates/_helpers.tpl
new file mode 100644
index 0000000..643eef6
--- /dev/null
+++ b/charts/cert-manager-webhook-gandi/templates/_helpers.tpl
@@ -0,0 +1,48 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "cert-manager-webhook-gandi.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "cert-manager-webhook-gandi.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "cert-manager-webhook-gandi.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- define "cert-manager-webhook-gandi.selfSignedIssuer" -}}
+{{ printf "%s-selfsign" (include "cert-manager-webhook-gandi.fullname" .) }}
+{{- end -}}
+
+{{- define "cert-manager-webhook-gandi.rootCAIssuer" -}}
+{{ printf "%s-ca" (include "cert-manager-webhook-gandi.fullname" .) }}
+{{- end -}}
+
+{{- define "cert-manager-webhook-gandi.rootCACertificate" -}}
+{{ printf "%s-ca" (include "cert-manager-webhook-gandi.fullname" .) }}
+{{- end -}}
+
+{{- define "cert-manager-webhook-gandi.servingCertificate" -}}
+{{ printf "%s-webhook-tls" (include "cert-manager-webhook-gandi.fullname" .) }}
+{{- end -}}
\ No newline at end of file
diff --git a/charts/cert-manager-webhook-gandi/templates/apiservice.yaml b/charts/cert-manager-webhook-gandi/templates/apiservice.yaml
new file mode 100644
index 0000000..1d28d91
--- /dev/null
+++ b/charts/cert-manager-webhook-gandi/templates/apiservice.yaml
@@ -0,0 +1,19 @@
+apiVersion: apiregistration.k8s.io/v1
+kind: APIService
+metadata:
+ name: v1alpha1.{{ .Values.groupName }}
+ labels:
+ app: {{ include "cert-manager-webhook-gandi.name" . }}
+ chart: {{ include "cert-manager-webhook-gandi.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ annotations:
+ cert-manager.io/inject-ca-from: "{{ .Values.certManager.namespace }}/{{ include "cert-manager-webhook-gandi.servingCertificate" . }}"
+spec:
+ group: {{ .Values.groupName }}
+ groupPriorityMinimum: 1000
+ versionPriority: 15
+ service:
+ name: {{ include "cert-manager-webhook-gandi.fullname" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+ version: v1alpha1
\ No newline at end of file
diff --git a/charts/cert-manager-webhook-gandi/templates/deployment.yaml b/charts/cert-manager-webhook-gandi/templates/deployment.yaml
new file mode 100644
index 0000000..073a61b
--- /dev/null
+++ b/charts/cert-manager-webhook-gandi/templates/deployment.yaml
@@ -0,0 +1,72 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "cert-manager-webhook-gandi.fullname" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+ labels:
+ app: {{ include "cert-manager-webhook-gandi.name" . }}
+ chart: {{ include "cert-manager-webhook-gandi.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ app: {{ include "cert-manager-webhook-gandi.name" . }}
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app: {{ include "cert-manager-webhook-gandi.name" . }}
+ release: {{ .Release.Name }}
+ spec:
+ serviceAccountName: {{ include "cert-manager-webhook-gandi.fullname" . }}
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ args:
+ - --tls-cert-file=/tls/tls.crt
+ - --tls-private-key-file=/tls/tls.key
+{{- if .Values.logLevel }}
+ - --v={{ .Values.logLevel }}
+{{- end }}
+ env:
+ - name: GROUP_NAME
+ value: {{ .Values.groupName | quote }}
+ ports:
+ - name: https
+ containerPort: 443
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ scheme: HTTPS
+ path: /healthz
+ port: https
+ readinessProbe:
+ httpGet:
+ scheme: HTTPS
+ path: /healthz
+ port: https
+ volumeMounts:
+ - name: certs
+ mountPath: /tls
+ readOnly: true
+ resources:
+{{ toYaml .Values.resources | indent 12 }}
+ volumes:
+ - name: certs
+ secret:
+ secretName: {{ include "cert-manager-webhook-gandi.servingCertificate" . }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+ {{- end }}
\ No newline at end of file
diff --git a/charts/cert-manager-webhook-gandi/templates/pki.yaml b/charts/cert-manager-webhook-gandi/templates/pki.yaml
new file mode 100644
index 0000000..07e26d0
--- /dev/null
+++ b/charts/cert-manager-webhook-gandi/templates/pki.yaml
@@ -0,0 +1,70 @@
+---
+# Create a selfsigned Issuer, in order to create a root CA certificate for
+# signing webhook serving certificates
+apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+ name: {{ include "cert-manager-webhook-gandi.selfSignedIssuer" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+ labels:
+ app: {{ include "cert-manager-webhook-gandi.name" . }}
+ chart: {{ include "cert-manager-webhook-gandi.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ selfSigned: {}
+---
+# Generate a CA Certificate used to sign certificates for the webhook
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: {{ include "cert-manager-webhook-gandi.rootCACertificate" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+ labels:
+ app: {{ include "cert-manager-webhook-gandi.name" . }}
+ chart: {{ include "cert-manager-webhook-gandi.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ secretName: {{ include "cert-manager-webhook-gandi.rootCACertificate" . }}
+ duration: 43800h # 5y
+ issuerRef:
+ name: {{ include "cert-manager-webhook-gandi.selfSignedIssuer" . }}
+ commonName: "ca.cert-manager-webhook-gandi.cert-manager"
+ isCA: true
+---
+# Create an Issuer that uses the above generated CA certificate to issue certs
+apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+ name: {{ include "cert-manager-webhook-gandi.rootCAIssuer" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+ labels:
+ app: {{ include "cert-manager-webhook-gandi.name" . }}
+ chart: {{ include "cert-manager-webhook-gandi.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ ca:
+ secretName: {{ include "cert-manager-webhook-gandi.rootCACertificate" . }}
+---
+# Finally, generate a serving certificate for the webhook to use
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: {{ include "cert-manager-webhook-gandi.servingCertificate" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+ labels:
+ app: {{ include "cert-manager-webhook-gandi.name" . }}
+ chart: {{ include "cert-manager-webhook-gandi.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ secretName: {{ include "cert-manager-webhook-gandi.servingCertificate" . }}
+ duration: 8760h # 1y
+ issuerRef:
+ name: {{ include "cert-manager-webhook-gandi.rootCAIssuer" . }}
+ dnsNames:
+ - {{ include "cert-manager-webhook-gandi.fullname" . }}
+ - {{ include "cert-manager-webhook-gandi.fullname" . }}.{{ .Values.certManager.namespace }}
+ - {{ include "cert-manager-webhook-gandi.fullname" . }}.{{ .Values.certManager.namespace }}.svc
\ No newline at end of file
diff --git a/charts/cert-manager-webhook-gandi/templates/rbac.yaml b/charts/cert-manager-webhook-gandi/templates/rbac.yaml
new file mode 100644
index 0000000..16cc149
--- /dev/null
+++ b/charts/cert-manager-webhook-gandi/templates/rbac.yaml
@@ -0,0 +1,165 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "cert-manager-webhook-gandi.fullname" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+ labels:
+ app: {{ include "cert-manager-webhook-gandi.name" . }}
+ chart: {{ include "cert-manager-webhook-gandi.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+---
+# Grant the webhook permission to read the ConfigMap containing the Kubernetes
+# apiserver's requestheader-ca-certificate
+# This ConfigMap is automatically created by the Kubernetes apiserver
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ include "cert-manager-webhook-gandi.fullname" . }}:webhook-authentication-reader
+ namespace: kube-system
+ labels:
+ app: {{ include "cert-manager-webhook-gandi.name" . }}
+ chart: {{ include "cert-manager-webhook-gandi.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: {{ include "cert-manager-webhook-gandi.fullname" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+---
+# apiserver gets the auth-delegator role to delegate auth decisions to
+# the core apiserver
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "cert-manager-webhook-gandi.fullname" . }}:auth-delegator
+ labels:
+ app: {{ include "cert-manager-webhook-gandi.name" . }}
+ chart: {{ include "cert-manager-webhook-gandi.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:auth-delegator
+subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: {{ include "cert-manager-webhook-gandi.fullname" . }}
+ namespace: {{ .Values.certManager.namespace | quote}}
+---
+# Grant cert-manager permission to validate using our apiserver
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "cert-manager-webhook-gandi.fullname" . }}:domain-solver
+ labels:
+ app: {{ include "cert-manager-webhook-gandi.name" . }}
+ chart: {{ include "cert-manager-webhook-gandi.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+rules:
+ - apiGroups:
+ - {{ .Values.groupName }}
+ resources:
+ - "*"
+ verbs:
+ - "create"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "cert-manager-webhook-gandi.fullname" . }}:domain-solver
+ labels:
+ app: {{ include "cert-manager-webhook-gandi.name" . }}
+ chart: {{ include "cert-manager-webhook-gandi.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "cert-manager-webhook-gandi.fullname" . }}:domain-solver
+subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: {{ .Values.certManager.serviceAccountName }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ include "cert-manager-webhook-gandi.fullname" . }}:secret-reader
+ namespace: {{ .Values.certManager.namespace | quote }}
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - "secrets"
+ resourceNames:
+ - "gandi-credentials"
+ verbs:
+ - "get"
+ - "watch"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ include "cert-manager-webhook-gandi.fullname" . }}:secret-reader
+ namespace: {{ .Values.certManager.namespace | quote }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ include "cert-manager-webhook-gandi.fullname" . }}:secret-reader
+subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: {{ include "cert-manager-webhook-gandi.fullname" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+{{- if .Values.features.apiPriorityAndFairness }}
+---
+# Grant cert-manager-webhook-gandi permission to read the flow control mechanism (APF)
+# API Priority and Fairness is enabled by default in Kubernetes 1.20
+# https://kubernetes.io/docs/concepts/cluster-administration/flow-control/
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "cert-manager-webhook-gandi.fullname" . }}:flowcontrol-solver
+ labels:
+ app: {{ include "cert-manager-webhook-gandi.name" . }}
+ chart: {{ include "cert-manager-webhook-gandi.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+rules:
+ - apiGroups:
+ - "flowcontrol.apiserver.k8s.io"
+ resources:
+ - "prioritylevelconfigurations"
+ - "flowschemas"
+ verbs:
+ - "list"
+ - "watch"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "cert-manager-webhook-gandi.fullname" . }}:flowcontrol-solver
+ labels:
+ app: {{ include "cert-manager-webhook-gandi.name" . }}
+ chart: {{ include "cert-manager-webhook-gandi.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "cert-manager-webhook-gandi.fullname" . }}:flowcontrol-solver
+subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: {{ include "cert-manager-webhook-gandi.fullname" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/cert-manager-webhook-gandi/templates/service.yaml b/charts/cert-manager-webhook-gandi/templates/service.yaml
new file mode 100644
index 0000000..817c60c
--- /dev/null
+++ b/charts/cert-manager-webhook-gandi/templates/service.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "cert-manager-webhook-gandi.fullname" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+ labels:
+ app: {{ include "cert-manager-webhook-gandi.name" . }}
+ chart: {{ include "cert-manager-webhook-gandi.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: https
+ protocol: TCP
+ name: https
+ selector:
+ app: {{ include "cert-manager-webhook-gandi.name" . }}
+ release: {{ .Release.Name }}
\ No newline at end of file
diff --git a/charts/cert-manager-webhook-gandi/values.yaml b/charts/cert-manager-webhook-gandi/values.yaml
new file mode 100644
index 0000000..ab255cc
--- /dev/null
+++ b/charts/cert-manager-webhook-gandi/values.yaml
@@ -0,0 +1,20 @@
+groupName: acme.bwolf.me
+logLevel: 6
+certManager:
+ namespace: cert-manager
+ serviceAccountName: cert-manager
+image:
+ repository: bwolf/cert-manager-webhook-gandi
+ tag: 0.2.0
+ pullPolicy: IfNotPresent
+nameOverride: ''
+fullnameOverride: ''
+service:
+ type: ClusterIP
+ port: 443
+features:
+ apiPriorityAndFairness: false
+resources: {}
+nodeSelector: {}
+tolerations: []
+affinity: {}
diff --git a/charts/cert-manager-webhook-pcloud/.helmignore b/charts/cert-manager-webhook-pcloud/.helmignore
new file mode 100644
index 0000000..f0c1319
--- /dev/null
+++ b/charts/cert-manager-webhook-pcloud/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/charts/cert-manager-webhook-pcloud/Chart.yaml b/charts/cert-manager-webhook-pcloud/Chart.yaml
new file mode 100644
index 0000000..748ca83
--- /dev/null
+++ b/charts/cert-manager-webhook-pcloud/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v2
+description: A Helm chart for cert-manager-webhook-pcloud
+name: cert-manager-webhook-pcloud
+version: v0.2.0
diff --git a/charts/cert-manager-webhook-pcloud/templates/_helpers.tpl b/charts/cert-manager-webhook-pcloud/templates/_helpers.tpl
new file mode 100644
index 0000000..1332d17
--- /dev/null
+++ b/charts/cert-manager-webhook-pcloud/templates/_helpers.tpl
@@ -0,0 +1,48 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "cert-manager-webhook-pcloud.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "cert-manager-webhook-pcloud.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "cert-manager-webhook-pcloud.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- define "cert-manager-webhook-pcloud.selfSignedIssuer" -}}
+{{ printf "%s-selfsign" (include "cert-manager-webhook-pcloud.fullname" .) }}
+{{- end -}}
+
+{{- define "cert-manager-webhook-pcloud.rootCAIssuer" -}}
+{{ printf "%s-ca" (include "cert-manager-webhook-pcloud.fullname" .) }}
+{{- end -}}
+
+{{- define "cert-manager-webhook-pcloud.rootCACertificate" -}}
+{{ printf "%s-ca" (include "cert-manager-webhook-pcloud.fullname" .) }}
+{{- end -}}
+
+{{- define "cert-manager-webhook-pcloud.servingCertificate" -}}
+{{ printf "%s-webhook-tls" (include "cert-manager-webhook-pcloud.fullname" .) }}
+{{- end -}}
diff --git a/charts/cert-manager-webhook-pcloud/templates/apiservice.yaml b/charts/cert-manager-webhook-pcloud/templates/apiservice.yaml
new file mode 100644
index 0000000..e54fd33
--- /dev/null
+++ b/charts/cert-manager-webhook-pcloud/templates/apiservice.yaml
@@ -0,0 +1,19 @@
+apiVersion: apiregistration.k8s.io/v1
+kind: APIService
+metadata:
+ name: v1alpha1.{{ .Values.apiGroupName }}
+ labels:
+ app: {{ include "cert-manager-webhook-pcloud.name" . }}
+ chart: {{ include "cert-manager-webhook-pcloud.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ annotations:
+ cert-manager.io/inject-ca-from: "{{ .Values.certManager.namespace }}/{{ include "cert-manager-webhook-pcloud.servingCertificate" . }}"
+spec:
+ group: {{ .Values.apiGroupName }}
+ groupPriorityMinimum: 1000
+ versionPriority: 15
+ service:
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+ version: v1alpha1
diff --git a/charts/cert-manager-webhook-pcloud/templates/deployment.yaml b/charts/cert-manager-webhook-pcloud/templates/deployment.yaml
new file mode 100644
index 0000000..a9bf3c3
--- /dev/null
+++ b/charts/cert-manager-webhook-pcloud/templates/deployment.yaml
@@ -0,0 +1,75 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+ labels:
+ app: {{ include "cert-manager-webhook-pcloud.name" . }}
+ chart: {{ include "cert-manager-webhook-pcloud.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ app: {{ include "cert-manager-webhook-pcloud.name" . }}
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app: {{ include "cert-manager-webhook-pcloud.name" . }}
+ release: {{ .Release.Name }}
+ spec:
+ serviceAccountName: {{ include "cert-manager-webhook-pcloud.fullname" . }}
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ args:
+ - /dns-challenge-solver
+ - --tls-cert-file=/tls/tls.crt
+ - --tls-private-key-file=/tls/tls.key
+{{- if .Values.logLevel }}
+ - --v={{ .Values.logLevel }}
+{{- end }}
+ env:
+ - name: API_GROUP_NAME
+ value: {{ .Values.apiGroupName | quote }}
+ - name: RESOLVER_NAME
+ value: {{ .Values.resolverName | quote }}
+ ports:
+ - name: https
+ containerPort: 443
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ scheme: HTTPS
+ path: /healthz
+ port: https
+ readinessProbe:
+ httpGet:
+ scheme: HTTPS
+ path: /healthz
+ port: https
+ volumeMounts:
+ - name: certs
+ mountPath: /tls
+ readOnly: true
+ resources:
+{{ toYaml .Values.resources | indent 12 }}
+ volumes:
+ - name: certs
+ secret:
+ secretName: {{ include "cert-manager-webhook-pcloud.servingCertificate" . }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+ {{- end }}
diff --git a/charts/cert-manager-webhook-pcloud/templates/pki.yaml b/charts/cert-manager-webhook-pcloud/templates/pki.yaml
new file mode 100644
index 0000000..59caf22
--- /dev/null
+++ b/charts/cert-manager-webhook-pcloud/templates/pki.yaml
@@ -0,0 +1,70 @@
+---
+# Create a selfsigned Issuer, in order to create a root CA certificate for
+# signing webhook serving certificates
+apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+ name: {{ include "cert-manager-webhook-pcloud.selfSignedIssuer" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+ labels:
+ app: {{ include "cert-manager-webhook-pcloud.name" . }}
+ chart: {{ include "cert-manager-webhook-pcloud.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ selfSigned: {}
+---
+# Generate a CA Certificate used to sign certificates for the webhook
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: {{ include "cert-manager-webhook-pcloud.rootCACertificate" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+ labels:
+ app: {{ include "cert-manager-webhook-pcloud.name" . }}
+ chart: {{ include "cert-manager-webhook-pcloud.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ secretName: {{ include "cert-manager-webhook-pcloud.rootCACertificate" . }}
+ duration: 43800h # 5y
+ issuerRef:
+ name: {{ include "cert-manager-webhook-pcloud.selfSignedIssuer" . }}
+ commonName: "ca.cert-manager-webhook-pcloud.cert-manager"
+ isCA: true
+---
+# Create an Issuer that uses the above generated CA certificate to issue certs
+apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+ name: {{ include "cert-manager-webhook-pcloud.rootCAIssuer" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+ labels:
+ app: {{ include "cert-manager-webhook-pcloud.name" . }}
+ chart: {{ include "cert-manager-webhook-pcloud.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ ca:
+ secretName: {{ include "cert-manager-webhook-pcloud.rootCACertificate" . }}
+---
+# Finally, generate a serving certificate for the webhook to use
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: {{ include "cert-manager-webhook-pcloud.servingCertificate" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+ labels:
+ app: {{ include "cert-manager-webhook-pcloud.name" . }}
+ chart: {{ include "cert-manager-webhook-pcloud.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ secretName: {{ include "cert-manager-webhook-pcloud.servingCertificate" . }}
+ duration: 8760h # 1y
+ issuerRef:
+ name: {{ include "cert-manager-webhook-pcloud.rootCAIssuer" . }}
+ dnsNames:
+ - {{ include "cert-manager-webhook-pcloud.fullname" . }}
+ - {{ include "cert-manager-webhook-pcloud.fullname" . }}.{{ .Values.certManager.namespace }}
+ - {{ include "cert-manager-webhook-pcloud.fullname" . }}.{{ .Values.certManager.namespace }}.svc
\ No newline at end of file
diff --git a/charts/cert-manager-webhook-pcloud/templates/rbac.yaml b/charts/cert-manager-webhook-pcloud/templates/rbac.yaml
new file mode 100644
index 0000000..acd44c1
--- /dev/null
+++ b/charts/cert-manager-webhook-pcloud/templates/rbac.yaml
@@ -0,0 +1,193 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+ labels:
+ app: {{ include "cert-manager-webhook-pcloud.name" . }}
+ chart: {{ include "cert-manager-webhook-pcloud.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+---
+# Grant the webhook permission to read the ConfigMap containing the Kubernetes
+# apiserver's requestheader-ca-certificate
+# This ConfigMap is automatically created by the Kubernetes apiserver
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}:webhook-authentication-reader
+ namespace: kube-system
+ labels:
+ app: {{ include "cert-manager-webhook-pcloud.name" . }}
+ chart: {{ include "cert-manager-webhook-pcloud.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+---
+# apiserver gets the auth-delegator role to delegate auth decisions to
+# the core apiserver
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}:auth-delegator
+ labels:
+ app: {{ include "cert-manager-webhook-pcloud.name" . }}
+ chart: {{ include "cert-manager-webhook-pcloud.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:auth-delegator
+subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}
+ namespace: {{ .Values.certManager.namespace | quote}}
+---
+# Grant cert-manager permission to validate using our apiserver
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}:domain-solver
+ labels:
+ app: {{ include "cert-manager-webhook-pcloud.name" . }}
+ chart: {{ include "cert-manager-webhook-pcloud.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+rules:
+ - apiGroups:
+ - {{ .Values.apiGroupName }}
+ resources:
+ - "*"
+ verbs:
+ - "create"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}:domain-solver
+ labels:
+ app: {{ include "cert-manager-webhook-pcloud.name" . }}
+ chart: {{ include "cert-manager-webhook-pcloud.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}:domain-solver
+subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: {{ .Values.certManager.serviceAccountName }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}:secret-reader
+ namespace: {{ .Values.certManager.namespace | quote }}
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - "secrets"
+ resourceNames:
+ - "pcloud-credentials"
+ verbs:
+ - "get"
+ - "watch"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}:secret-reader
+ namespace: {{ .Values.certManager.namespace | quote }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}:secret-reader
+subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+---
+# TODO(gio): limit access by resourceNames
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}:api-configmap-reader
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - "configmaps"
+ verbs:
+ - "get"
+ - "watch"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}:api-configmap-reader
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}:api-configmap-reader
+subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+{{- if .Values.features.apiPriorityAndFairness }}
+---
+# Grant cert-manager-webhook-pcloud permission to read the flow control mechanism (APF)
+# API Priority and Fairness is enabled by default in Kubernetes 1.20
+# https://kubernetes.io/docs/concepts/cluster-administration/flow-control/
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}:flowcontrol-solver
+ labels:
+ app: {{ include "cert-manager-webhook-pcloud.name" . }}
+ chart: {{ include "cert-manager-webhook-pcloud.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+rules:
+ - apiGroups:
+ - "flowcontrol.apiserver.k8s.io"
+ resources:
+ - "prioritylevelconfigurations"
+ - "flowschemas"
+ verbs:
+ - "list"
+ - "watch"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}:flowcontrol-solver
+ labels:
+ app: {{ include "cert-manager-webhook-pcloud.name" . }}
+ chart: {{ include "cert-manager-webhook-pcloud.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}:flowcontrol-solver
+subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+{{- end }}
diff --git a/charts/cert-manager-webhook-pcloud/templates/role.yaml b/charts/cert-manager-webhook-pcloud/templates/role.yaml
new file mode 100644
index 0000000..9c18ba1
--- /dev/null
+++ b/charts/cert-manager-webhook-pcloud/templates/role.yaml
@@ -0,0 +1,24 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: cert-manager-{{ .Values.resolverName }}
+rules:
+- apiGroups:
+ - {{ .Values.apiGroupName }}
+ resources:
+ - {{ .Values.resolverName }}
+ verbs:
+ - "*" # TODO(giolekva): limit
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: cert-manager-pcloud-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cert-manager-{{ .Values.resolverName }}
+subjects:
+- kind: ServiceAccount
+ name: {{ .Values.certManager.name }}
+ namespace: {{ .Values.certManager.namespace }}
diff --git a/charts/cert-manager-webhook-pcloud/templates/service.yaml b/charts/cert-manager-webhook-pcloud/templates/service.yaml
new file mode 100644
index 0000000..022dbf1
--- /dev/null
+++ b/charts/cert-manager-webhook-pcloud/templates/service.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "cert-manager-webhook-pcloud.fullname" . }}
+ namespace: {{ .Values.certManager.namespace | quote }}
+ labels:
+ app: {{ include "cert-manager-webhook-pcloud.name" . }}
+ chart: {{ include "cert-manager-webhook-pcloud.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: https
+ protocol: TCP
+ name: https
+ selector:
+ app: {{ include "cert-manager-webhook-pcloud.name" . }}
+ release: {{ .Release.Name }}
\ No newline at end of file
diff --git a/charts/cert-manager-webhook-pcloud/values.yaml b/charts/cert-manager-webhook-pcloud/values.yaml
new file mode 100644
index 0000000..fccfcd7
--- /dev/null
+++ b/charts/cert-manager-webhook-pcloud/values.yaml
@@ -0,0 +1,21 @@
+apiGroupName: ""
+resolverName: ""
+logLevel: 6
+certManager:
+ namespace: cert-manager
+ serviceAccountName: cert-manager
+image:
+ repository: giolekva/dns-challenge-solver
+ tag: latest
+ pullPolicy: Always
+nameOverride: ""
+fullnameOverride: ""
+service:
+ type: ClusterIP
+ port: 443
+features:
+ apiPriorityAndFairness: false
+resources: {}
+nodeSelector: {}
+tolerations: []
+affinity: {}
diff --git a/charts/cert-manager/Chart.yaml b/charts/cert-manager/Chart.yaml
new file mode 100644
index 0000000..a7c8fe6
--- /dev/null
+++ b/charts/cert-manager/Chart.yaml
@@ -0,0 +1,24 @@
+annotations:
+ artifacthub.io/prerelease: "false"
+ artifacthub.io/signKey: |
+ fingerprint: 1020CF3C033D4F35BAE1C19E1226061C665DF13E
+ url: https://cert-manager.io/public-keys/cert-manager-keyring-2021-09-20-1020CF3C033D4F35BAE1C19E1226061C665DF13E.gpg
+apiVersion: v1
+appVersion: v1.12.2
+description: A Helm chart for cert-manager
+home: https://github.com/cert-manager/cert-manager
+icon: https://raw.githubusercontent.com/cert-manager/cert-manager/d53c0b9270f8cd90d908460d69502694e1838f5f/logo/logo-small.png
+keywords:
+- cert-manager
+- kube-lego
+- letsencrypt
+- tls
+kubeVersion: '>= 1.22.0-0'
+maintainers:
+- email: cert-manager-maintainers@googlegroups.com
+ name: cert-manager-maintainers
+ url: https://cert-manager.io
+name: cert-manager
+sources:
+- https://github.com/cert-manager/cert-manager
+version: v1.12.2
diff --git a/charts/cert-manager/README.md b/charts/cert-manager/README.md
new file mode 100644
index 0000000..8507a8a
--- /dev/null
+++ b/charts/cert-manager/README.md
@@ -0,0 +1,271 @@
+# cert-manager
+
+cert-manager is a Kubernetes addon to automate the management and issuance of
+TLS certificates from various issuing sources.
+
+It will ensure certificates are valid and up to date periodically, and attempt
+to renew certificates at an appropriate time before expiry.
+
+## Prerequisites
+
+- Kubernetes 1.20+
+
+## Installing the Chart
+
+Full installation instructions, including details on how to configure extra
+functionality in cert-manager can be found in the [installation docs](https://cert-manager.io/docs/installation/kubernetes/).
+
+Before installing the chart, you must first install the cert-manager CustomResourceDefinition resources.
+This is performed in a separate step to allow you to easily uninstall and reinstall cert-manager without deleting your installed custom resources.
+
+```bash
+$ kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.12.2/cert-manager.crds.yaml
+```
+
+To install the chart with the release name `my-release`:
+
+```console
+## Add the Jetstack Helm repository
+$ helm repo add jetstack https://charts.jetstack.io
+
+## Install the cert-manager helm chart
+$ helm install my-release --namespace cert-manager --version v1.12.2 jetstack/cert-manager
+```
+
+In order to begin issuing certificates, you will need to set up a ClusterIssuer
+or Issuer resource (for example, by creating a 'letsencrypt-staging' issuer).
+
+More information on the different types of issuers and how to configure them
+can be found in [our documentation](https://cert-manager.io/docs/configuration/).
+
+For information on how to configure cert-manager to automatically provision
+Certificates for Ingress resources, take a look at the
+[Securing Ingresses documentation](https://cert-manager.io/docs/usage/ingress/).
+
+> **Tip**: List all releases using `helm list`
+
+## Upgrading the Chart
+
+Special considerations may be required when upgrading the Helm chart, and these
+are documented in our full [upgrading guide](https://cert-manager.io/docs/installation/upgrading/).
+
+**Please check here before performing upgrades!**
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```console
+$ helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+If you want to completely uninstall cert-manager from your cluster, you will also need to
+delete the previously installed CustomResourceDefinition resources:
+
+```console
+$ kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.12.2/cert-manager.crds.yaml
+```
+
+## Configuration
+
+The following table lists the configurable parameters of the cert-manager chart and their default values.
+
+| Parameter | Description | Default |
+| --------- | ----------- | ------- |
+| `global.imagePullSecrets` | Reference to one or more secrets to be used when pulling images | `[]` |
+| `global.commonLabels` | Labels to apply to all resources | `{}` |
+| `global.rbac.create` | If `true`, create and use RBAC resources (includes sub-charts) | `true` |
+| `global.priorityClassName`| Priority class name for cert-manager and webhook pods | `""` |
+| `global.podSecurityPolicy.enabled` | If `true`, create and use PodSecurityPolicy (includes sub-charts) | `false` |
+| `global.podSecurityPolicy.useAppArmor` | If `true`, use Apparmor seccomp profile in PSP | `true` |
+| `global.leaderElection.namespace` | Override the namespace used to store the ConfigMap for leader election | `kube-system` |
+| `global.leaderElection.leaseDuration` | The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate | |
+| `global.leaderElection.renewDeadline` | The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration | |
+| `global.leaderElection.retryPeriod` | The duration the clients should wait between attempting acquisition and renewal of a leadership | |
+| `installCRDs` | If true, CRD resources will be installed as part of the Helm chart. If enabled, when uninstalling CRD resources will be deleted causing all installed custom resources to be DELETED | `false` |
+| `image.repository` | Image repository | `quay.io/jetstack/cert-manager-controller` |
+| `image.tag` | Image tag | `v1.12.2` |
+| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
+| `replicaCount` | Number of cert-manager replicas | `1` |
+| `clusterResourceNamespace` | Override the namespace used to store DNS provider credentials etc. for ClusterIssuer resources | Same namespace as cert-manager pod |
+| `featureGates` | Set of comma-separated key=value pairs that describe feature gates on the controller. Some feature gates may also have to be enabled on other components, and can be set supplying the `feature-gate` flag to `<component>.extraArgs` | `` |
+| `extraArgs` | Optional flags for cert-manager | `[]` |
+| `extraEnv` | Optional environment variables for cert-manager | `[]` |
+| `serviceAccount.create` | If `true`, create a new service account | `true` |
+| `serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the fullname template | |
+| `serviceAccount.annotations` | Annotations to add to the service account | |
+| `serviceAccount.automountServiceAccountToken` | Automount API credentials for the Service Account | `true` |
+| `volumes` | Optional volumes for cert-manager | `[]` |
+| `volumeMounts` | Optional volume mounts for cert-manager | `[]` |
+| `resources` | CPU/memory resource requests/limits | `{}` |
+| `securityContext` | Security context for the controller pod assignment | refer to [Default Security Contexts](#default-security-contexts) |
+| `containerSecurityContext` | Security context to be set on the controller component container | refer to [Default Security Contexts](#default-security-contexts) |
+| `nodeSelector` | Node labels for pod assignment | `{}` |
+| `affinity` | Node affinity for pod assignment | `{}` |
+| `tolerations` | Node tolerations for pod assignment | `[]` |
+| `topologySpreadConstraints` | Topology spread constraints for pod assignment | `[]` |
+| `livenessProbe.enabled` | Enable or disable the liveness probe for the controller container in the controller Pod. See https://cert-manager.io/docs/installation/best-practice/ to learn about when you might want to enable this livenss probe. | `false` |
+| `livenessProbe.initialDelaySeconds` | The liveness probe initial delay (in seconds) | `10` |
+| `livenessProbe.periodSeconds` | The liveness probe period (in seconds) | `10` |
+| `livenessProbe.timeoutSeconds` | The liveness probe timeout (in seconds) | `10` |
+| `livenessProbe.periodSeconds` | The liveness probe period (in seconds) | `10` |
+| `livenessProbe.successThreshold` | The liveness probe success threshold | `1` |
+| `livenessProbe.failureThreshold` | The liveness probe failure threshold | `8` |
+| `ingressShim.defaultIssuerName` | Optional default issuer to use for ingress resources | |
+| `ingressShim.defaultIssuerKind` | Optional default issuer kind to use for ingress resources | |
+| `ingressShim.defaultIssuerGroup` | Optional default issuer group to use for ingress resources | |
+| `prometheus.enabled` | Enable Prometheus monitoring | `true` |
+| `prometheus.servicemonitor.enabled` | Enable Prometheus Operator ServiceMonitor monitoring | `false` |
+| `prometheus.servicemonitor.namespace` | Define namespace where to deploy the ServiceMonitor resource | (namespace where you are deploying) |
+| `prometheus.servicemonitor.prometheusInstance` | Prometheus Instance definition | `default` |
+| `prometheus.servicemonitor.targetPort` | Prometheus scrape port | `9402` |
+| `prometheus.servicemonitor.path` | Prometheus scrape path | `/metrics` |
+| `prometheus.servicemonitor.interval` | Prometheus scrape interval | `60s` |
+| `prometheus.servicemonitor.labels` | Add custom labels to ServiceMonitor | |
+| `prometheus.servicemonitor.scrapeTimeout` | Prometheus scrape timeout | `30s` |
+| `prometheus.servicemonitor.honorLabels` | Enable label honoring for metrics scraped by Prometheus (see [Prometheus scrape config docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) for details). By setting `honorLabels` to `true`, Prometheus will prefer label contents given by cert-manager on conflicts. Can be used to remove the "exported_namespace" label for example. | `false` |
+| `podAnnotations` | Annotations to add to the cert-manager pod | `{}` |
+| `deploymentAnnotations` | Annotations to add to the cert-manager deployment | `{}` |
+| `podDisruptionBudget.enabled` | Adds a PodDisruptionBudget for the cert-manager deployment | `false` |
+| `podDisruptionBudget.minAvailable` | Configures the minimum available pods for voluntary disruptions. Cannot used if `maxUnavailable` is set. | `1` |
+| `podDisruptionBudget.maxUnavailable` | Configures the maximum unavailable pods for voluntary disruptions. Cannot used if `minAvailable` is set. | |
+| `podDnsPolicy` | Optional cert-manager pod [DNS policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pods-dns-policy) | |
+| `podDnsConfig` | Optional cert-manager pod [DNS configurations](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pods-dns-config) | |
+| `podLabels` | Labels to add to the cert-manager pod | `{}` |
+| `serviceLabels` | Labels to add to the cert-manager controller service | `{}` |
+| `serviceAnnotations` | Annotations to add to the cert-manager service | `{}` |
+| `http_proxy` | Value of the `HTTP_PROXY` environment variable in the cert-manager pod | |
+| `https_proxy` | Value of the `HTTPS_PROXY` environment variable in the cert-manager pod | |
+| `no_proxy` | Value of the `NO_PROXY` environment variable in the cert-manager pod | |
+| `dns01RecursiveNameservers` | Comma separated string with host and port of the recursive nameservers cert-manager should query | `` |
+| `dns01RecursiveNameserversOnly` | Forces cert-manager to only use the recursive nameservers for verification. | `false` |
+| `enableCertificateOwnerRef` | When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted | `false` |
+| `webhook.replicaCount` | Number of cert-manager webhook replicas | `1` |
+| `webhook.timeoutSeconds` | Seconds the API server should wait the webhook to respond before treating the call as a failure. | `10` |
+| `webhook.podAnnotations` | Annotations to add to the webhook pods | `{}` |
+| `webhook.podLabels` | Labels to add to the cert-manager webhook pod | `{}` |
+| `webhook.serviceLabels` | Labels to add to the cert-manager webhook service | `{}` |
+| `webhook.deploymentAnnotations` | Annotations to add to the webhook deployment | `{}` |
+| `webhook.podDisruptionBudget.enabled` | Adds a PodDisruptionBudget for the cert-manager deployment | `false` |
+| `webhook.podDisruptionBudget.minAvailable` | Configures the minimum available pods for voluntary disruptions. Cannot used if `maxUnavailable` is set. | `1` |
+| `webhook.podDisruptionBudget.maxUnavailable` | Configures the maximum unavailable pods for voluntary disruptions. Cannot used if `minAvailable` is set. | |
+| `webhook.mutatingWebhookConfigurationAnnotations` | Annotations to add to the mutating webhook configuration | `{}` |
+| `webhook.validatingWebhookConfigurationAnnotations` | Annotations to add to the validating webhook configuration | `{}` |
+| `webhook.serviceAnnotations` | Annotations to add to the webhook service | `{}` |
+| `webhook.config` | WebhookConfiguration YAML used to configure flags for the webhook. Generates a ConfigMap containing contents of the field. See `values.yaml` for example. | `{}` |
+| `webhook.extraArgs` | Optional flags for cert-manager webhook component | `[]` |
+| `webhook.serviceAccount.create` | If `true`, create a new service account for the webhook component | `true` |
+| `webhook.serviceAccount.name` | Service account for the webhook component to be used. If not set and `webhook.serviceAccount.create` is `true`, a name is generated using the fullname template | |
+| `webhook.serviceAccount.annotations` | Annotations to add to the service account for the webhook component | |
+| `webhook.serviceAccount.automountServiceAccountToken` | Automount API credentials for the webhook Service Account | |
+| `webhook.resources` | CPU/memory resource requests/limits for the webhook pods | `{}` |
+| `webhook.nodeSelector` | Node labels for webhook pod assignment | `{}` |
+| `webhook.networkPolicy.enabled` | Enable default network policies for webhooks egress and ingress traffic | `false` |
+| `webhook.networkPolicy.ingress` | Sets ingress policy block. See NetworkPolicy documentation. See `values.yaml` for example. | `{}` |
+| `webhook.networkPolicy.egress` | Sets ingress policy block. See NetworkPolicy documentation. See `values.yaml` for example. | `{}` |
+| `webhook.affinity` | Node affinity for webhook pod assignment | `{}` |
+| `webhook.tolerations` | Node tolerations for webhook pod assignment | `[]` |
+| `webhook.topologySpreadConstraints` | Topology spread constraints for webhook pod assignment | `[]` |
+| `webhook.image.repository` | Webhook image repository | `quay.io/jetstack/cert-manager-webhook` |
+| `webhook.image.tag` | Webhook image tag | `v1.12.2` |
+| `webhook.image.pullPolicy` | Webhook image pull policy | `IfNotPresent` |
+| `webhook.securePort` | The port that the webhook should listen on for requests. | `10250` |
+| `webhook.securityContext` | Security context for webhook pod assignment | refer to [Default Security Contexts](#default-security-contexts) |
+| `webhook.containerSecurityContext` | Security context to be set on the webhook component container | refer to [Default Security Contexts](#default-security-contexts) |
+| `webhook.hostNetwork` | If `true`, run the Webhook on the host network. | `false` |
+| `webhook.serviceType` | The type of the `Service`. | `ClusterIP` |
+| `webhook.loadBalancerIP` | The specific load balancer IP to use (when `serviceType` is `LoadBalancer`). | |
+| `webhook.url.host` | The host to use to reach the webhook, instead of using internal cluster DNS for the service. | |
+| `webhook.livenessProbe.failureThreshold` | The liveness probe failure threshold | `3` |
+| `webhook.livenessProbe.initialDelaySeconds` | The liveness probe initial delay (in seconds) | `60` |
+| `webhook.livenessProbe.periodSeconds` | The liveness probe period (in seconds) | `10` |
+| `webhook.livenessProbe.successThreshold` | The liveness probe success threshold | `1` |
+| `webhook.livenessProbe.timeoutSeconds` | The liveness probe timeout (in seconds) | `1` |
+| `webhook.readinessProbe.failureThreshold` | The readiness probe failure threshold | `3` |
+| `webhook.readinessProbe.initialDelaySeconds` | The readiness probe initial delay (in seconds) | `5` |
+| `webhook.readinessProbe.periodSeconds` | The readiness probe period (in seconds) | `5` |
+| `webhook.readinessProbe.successThreshold` | The readiness probe success threshold | `1` |
+| `webhook.readinessProbe.timeoutSeconds` | The readiness probe timeout (in seconds) | `1` |
+| `cainjector.enabled` | Toggles whether the cainjector component should be installed (required for the webhook component to work) | `true` |
+| `cainjector.replicaCount` | Number of cert-manager cainjector replicas | `1` |
+| `cainjector.podAnnotations` | Annotations to add to the cainjector pods | `{}` |
+| `cainjector.podLabels` | Labels to add to the cert-manager cainjector pod | `{}` |
+| `cainjector.deploymentAnnotations` | Annotations to add to the cainjector deployment | `{}` |
+| `cainjector.podDisruptionBudget.enabled` | Adds a PodDisruptionBudget for the cert-manager deployment | `false` |
+| `cainjector.podDisruptionBudget.minAvailable` | Configures the minimum available pods for voluntary disruptions. Cannot used if `maxUnavailable` is set. | `1` |
+| `cainjector.podDisruptionBudget.maxUnavailable` | Configures the maximum unavailable pods for voluntary disruptions. Cannot used if `minAvailable` is set. | |
+| `cainjector.extraArgs` | Optional flags for cert-manager cainjector component | `[]` |
+| `cainjector.serviceAccount.create` | If `true`, create a new service account for the cainjector component | `true` |
+| `cainjector.serviceAccount.name` | Service account for the cainjector component to be used. If not set and `cainjector.serviceAccount.create` is `true`, a name is generated using the fullname template | |
+| `cainjector.serviceAccount.annotations` | Annotations to add to the service account for the cainjector component | |
+| `cainjector.serviceAccount.automountServiceAccountToken` | Automount API credentials for the cainjector Service Account | `true` |
+| `cainjector.resources` | CPU/memory resource requests/limits for the cainjector pods | `{}` |
+| `cainjector.nodeSelector` | Node labels for cainjector pod assignment | `{}` |
+| `cainjector.affinity` | Node affinity for cainjector pod assignment | `{}` |
+| `cainjector.tolerations` | Node tolerations for cainjector pod assignment | `[]` |
+| `cainjector.topologySpreadConstraints` | Topology spread constraints for cainjector pod assignment | `[]` |
+| `cainjector.image.repository` | cainjector image repository | `quay.io/jetstack/cert-manager-cainjector` |
+| `cainjector.image.tag` | cainjector image tag | `v1.12.2` |
+| `cainjector.image.pullPolicy` | cainjector image pull policy | `IfNotPresent` |
+| `cainjector.securityContext` | Security context for cainjector pod assignment | refer to [Default Security Contexts](#default-security-contexts) |
+| `cainjector.containerSecurityContext` | Security context to be set on cainjector component container | refer to [Default Security Contexts](#default-security-contexts) |
+| `acmesolver.image.repository` | acmesolver image repository | `quay.io/jetstack/cert-manager-acmesolver` |
+| `acmesolver.image.tag` | acmesolver image tag | `v1.12.2` |
+| `acmesolver.image.pullPolicy` | acmesolver image pull policy | `IfNotPresent` |
+| `startupapicheck.enabled` | Toggles whether the startupapicheck Job should be installed | `true` |
+| `startupapicheck.securityContext` | Security context for startupapicheck pod assignment | refer to [Default Security Contexts](#default-security-contexts) |
+| `startupapicheck.containerSecurityContext` | Security context to be set on startupapicheck component container | refer to [Default Security Contexts](#default-security-contexts) |
+| `startupapicheck.timeout` | Timeout for 'kubectl check api' command | `1m` |
+| `startupapicheck.backoffLimit` | Job backoffLimit | `4` |
+| `startupapicheck.jobAnnotations` | Optional additional annotations to add to the startupapicheck Job | `{}` |
+| `startupapicheck.podAnnotations` | Optional additional annotations to add to the startupapicheck Pods | `{}` |
+| `startupapicheck.extraArgs` | Optional additional arguments for startupapicheck | `[]` |
+| `startupapicheck.resources` | CPU/memory resource requests/limits for the startupapicheck pod | `{}` |
+| `startupapicheck.nodeSelector` | Node labels for startupapicheck pod assignment | `{}` |
+| `startupapicheck.affinity` | Node affinity for startupapicheck pod assignment | `{}` |
+| `startupapicheck.tolerations` | Node tolerations for startupapicheck pod assignment | `[]` |
+| `startupapicheck.podLabels` | Optional additional labels to add to the startupapicheck Pods | `{}` |
+| `startupapicheck.image.repository` | startupapicheck image repository | `quay.io/jetstack/cert-manager-ctl` |
+| `startupapicheck.image.tag` | startupapicheck image tag | `v1.12.2` |
+| `startupapicheck.image.pullPolicy` | startupapicheck image pull policy | `IfNotPresent` |
+| `startupapicheck.serviceAccount.create` | If `true`, create a new service account for the startupapicheck component | `true` |
+| `startupapicheck.serviceAccount.name` | Service account for the startupapicheck component to be used. If not set and `startupapicheck.serviceAccount.create` is `true`, a name is generated using the fullname template | |
+| `startupapicheck.serviceAccount.annotations` | Annotations to add to the service account for the startupapicheck component | |
+| `startupapicheck.serviceAccount.automountServiceAccountToken` | Automount API credentials for the startupapicheck Service Account | `true` |
+| `maxConcurrentChallenges` | The maximum number of challenges that can be scheduled as 'processing' at once | `60` |
+
+### Default Security Contexts
+
+The default pod-level and container-level security contexts, below, adhere to the [restricted](https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted) Pod Security Standards policies.
+
+Default pod-level securityContext:
+```yaml
+runAsNonRoot: true
+seccompProfile:
+ type: RuntimeDefault
+```
+
+Default containerSecurityContext:
+```yaml
+allowPrivilegeEscalation: false
+capabilities:
+ drop:
+ - ALL
+```
+
+### Assigning Values
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
+
+Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
+
+```console
+$ helm install my-release -f values.yaml .
+```
+> **Tip**: You can use the default [values.yaml](https://github.com/cert-manager/cert-manager/blob/master/deploy/charts/cert-manager/values.yaml)
+
+## Contributing
+
+This chart is maintained at [github.com/cert-manager/cert-manager](https://github.com/cert-manager/cert-manager/tree/master/deploy/charts/cert-manager).
diff --git a/charts/cert-manager/templates/NOTES.txt b/charts/cert-manager/templates/NOTES.txt
new file mode 100644
index 0000000..1025354
--- /dev/null
+++ b/charts/cert-manager/templates/NOTES.txt
@@ -0,0 +1,15 @@
+cert-manager {{ .Chart.AppVersion }} has been deployed successfully!
+
+In order to begin issuing certificates, you will need to set up a ClusterIssuer
+or Issuer resource (for example, by creating a 'letsencrypt-staging' issuer).
+
+More information on the different types of issuers and how to configure them
+can be found in our documentation:
+
+https://cert-manager.io/docs/configuration/
+
+For information on how to configure cert-manager to automatically provision
+Certificates for Ingress resources, take a look at the `ingress-shim`
+documentation:
+
+https://cert-manager.io/docs/usage/ingress/
diff --git a/charts/cert-manager/templates/_helpers.tpl b/charts/cert-manager/templates/_helpers.tpl
new file mode 100644
index 0000000..90db4af
--- /dev/null
+++ b/charts/cert-manager/templates/_helpers.tpl
@@ -0,0 +1,174 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "cert-manager.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "cert-manager.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "cert-manager.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "cert-manager.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Webhook templates
+*/}}
+
+{{/*
+Expand the name of the chart.
+Manually fix the 'app' and 'name' labels to 'webhook' to maintain
+compatibility with the v0.9 deployment selector.
+*/}}
+{{- define "webhook.name" -}}
+{{- printf "webhook" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "webhook.fullname" -}}
+{{- $trimmedName := printf "%s" (include "cert-manager.fullname" .) | trunc 55 | trimSuffix "-" -}}
+{{- printf "%s-webhook" $trimmedName | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- define "webhook.caRef" -}}
+{{- template "cert-manager.namespace" }}/{{ template "webhook.fullname" . }}-ca
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "webhook.serviceAccountName" -}}
+{{- if .Values.webhook.serviceAccount.create -}}
+ {{ default (include "webhook.fullname" .) .Values.webhook.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.webhook.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+cainjector templates
+*/}}
+
+{{/*
+Expand the name of the chart.
+Manually fix the 'app' and 'name' labels to 'cainjector' to maintain
+compatibility with the v0.9 deployment selector.
+*/}}
+{{- define "cainjector.name" -}}
+{{- printf "cainjector" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "cainjector.fullname" -}}
+{{- $trimmedName := printf "%s" (include "cert-manager.fullname" .) | trunc 52 | trimSuffix "-" -}}
+{{- printf "%s-cainjector" $trimmedName | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "cainjector.serviceAccountName" -}}
+{{- if .Values.cainjector.serviceAccount.create -}}
+ {{ default (include "cainjector.fullname" .) .Values.cainjector.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.cainjector.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+startupapicheck templates
+*/}}
+
+{{/*
+Expand the name of the chart.
+Manually fix the 'app' and 'name' labels to 'startupapicheck' to maintain
+compatibility with the v0.9 deployment selector.
+*/}}
+{{- define "startupapicheck.name" -}}
+{{- printf "startupapicheck" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "startupapicheck.fullname" -}}
+{{- $trimmedName := printf "%s" (include "cert-manager.fullname" .) | trunc 52 | trimSuffix "-" -}}
+{{- printf "%s-startupapicheck" $trimmedName | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "startupapicheck.serviceAccountName" -}}
+{{- if .Values.startupapicheck.serviceAccount.create -}}
+ {{ default (include "startupapicheck.fullname" .) .Values.startupapicheck.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.startupapicheck.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "chartName" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Labels that should be added on each resource
+*/}}
+{{- define "labels" -}}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- if eq (default "helm" .Values.creator) "helm" }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+helm.sh/chart: {{ include "chartName" . }}
+{{- end -}}
+{{- if .Values.global.commonLabels}}
+{{ toYaml .Values.global.commonLabels }}
+{{- end }}
+{{- end -}}
+
+{{/*
+Namespace for all resources to be installed into
+If not defined in values file then the helm release namespace is used
+By default this is not set so the helm release namespace will be used
+
+This gets around an problem within helm discussed here
+https://github.com/helm/helm/issues/5358
+*/}}
+{{- define "cert-manager.namespace" -}}
+ {{ .Values.namespace | default .Release.Namespace }}
+{{- end -}}
diff --git a/charts/cert-manager/templates/cainjector-deployment.yaml b/charts/cert-manager/templates/cainjector-deployment.yaml
new file mode 100644
index 0000000..1220173
--- /dev/null
+++ b/charts/cert-manager/templates/cainjector-deployment.yaml
@@ -0,0 +1,117 @@
+{{- if .Values.cainjector.enabled }}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "cainjector.fullname" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ labels:
+ app: {{ include "cainjector.name" . }}
+ app.kubernetes.io/name: {{ include "cainjector.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "cainjector"
+ {{- include "labels" . | nindent 4 }}
+ {{- with .Values.cainjector.deploymentAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ replicas: {{ .Values.cainjector.replicaCount }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "cainjector.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "cainjector"
+ {{- with .Values.cainjector.strategy }}
+ strategy:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ template:
+ metadata:
+ labels:
+ app: {{ include "cainjector.name" . }}
+ app.kubernetes.io/name: {{ include "cainjector.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "cainjector"
+ {{- include "labels" . | nindent 8 }}
+ {{- with .Values.cainjector.podLabels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.cainjector.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ spec:
+ serviceAccountName: {{ template "cainjector.serviceAccountName" . }}
+ {{- if hasKey .Values.cainjector "automountServiceAccountToken" }}
+ automountServiceAccountToken: {{ .Values.cainjector.automountServiceAccountToken }}
+ {{- end }}
+ {{- with .Values.global.priorityClassName }}
+ priorityClassName: {{ . | quote }}
+ {{- end }}
+ {{- with .Values.cainjector.securityContext }}
+ securityContext:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: {{ .Chart.Name }}-cainjector
+ {{- with .Values.cainjector.image }}
+ image: "{{- if .registry -}}{{ .registry }}/{{- end -}}{{ .repository }}{{- if (.digest) -}} @{{ .digest }}{{- else -}}:{{ default $.Chart.AppVersion .tag }} {{- end -}}"
+ {{- end }}
+ imagePullPolicy: {{ .Values.cainjector.image.pullPolicy }}
+ args:
+ {{- if .Values.global.logLevel }}
+ - --v={{ .Values.global.logLevel }}
+ {{- end }}
+ {{- with .Values.global.leaderElection }}
+ - --leader-election-namespace={{ .namespace }}
+ {{- if .leaseDuration }}
+ - --leader-election-lease-duration={{ .leaseDuration }}
+ {{- end }}
+ {{- if .renewDeadline }}
+ - --leader-election-renew-deadline={{ .renewDeadline }}
+ {{- end }}
+ {{- if .retryPeriod }}
+ - --leader-election-retry-period={{ .retryPeriod }}
+ {{- end }}
+ {{- end }}
+ {{- with .Values.cainjector.extraArgs }}
+ {{- toYaml . | nindent 10 }}
+ {{- end }}
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- with .Values.cainjector.containerSecurityContext }}
+ securityContext:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.cainjector.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.cainjector.volumeMounts }}
+ volumeMounts:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.cainjector.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.cainjector.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.cainjector.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.cainjector.topologySpreadConstraints }}
+ topologySpreadConstraints:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.cainjector.volumes }}
+ volumes:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/cert-manager/templates/cainjector-poddisruptionbudget.yaml b/charts/cert-manager/templates/cainjector-poddisruptionbudget.yaml
new file mode 100644
index 0000000..f080b75
--- /dev/null
+++ b/charts/cert-manager/templates/cainjector-poddisruptionbudget.yaml
@@ -0,0 +1,26 @@
+{{- if .Values.cainjector.podDisruptionBudget.enabled }}
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: {{ include "cainjector.fullname" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ labels:
+ app: {{ include "cainjector.name" . }}
+ app.kubernetes.io/name: {{ include "cainjector.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "cainjector"
+ {{- include "labels" . | nindent 4 }}
+spec:
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "cainjector.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "cainjector"
+
+ {{- with .Values.cainjector.podDisruptionBudget.minAvailable }}
+ minAvailable: {{ . }}
+ {{- end }}
+ {{- with .Values.cainjector.podDisruptionBudget.maxUnavailable }}
+ maxUnavailable: {{ . }}
+ {{- end }}
+{{- end }}
diff --git a/charts/cert-manager/templates/cainjector-psp-clusterrole.yaml b/charts/cert-manager/templates/cainjector-psp-clusterrole.yaml
new file mode 100644
index 0000000..b75b9eb
--- /dev/null
+++ b/charts/cert-manager/templates/cainjector-psp-clusterrole.yaml
@@ -0,0 +1,20 @@
+{{- if .Values.cainjector.enabled }}
+{{- if .Values.global.podSecurityPolicy.enabled }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ template "cainjector.fullname" . }}-psp
+ labels:
+ app: {{ include "cainjector.name" . }}
+ app.kubernetes.io/name: {{ include "cainjector.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "cainjector"
+ {{- include "labels" . | nindent 4 }}
+rules:
+- apiGroups: ['policy']
+ resources: ['podsecuritypolicies']
+ verbs: ['use']
+ resourceNames:
+ - {{ template "cainjector.fullname" . }}
+{{- end }}
+{{- end }}
diff --git a/charts/cert-manager/templates/cainjector-psp-clusterrolebinding.yaml b/charts/cert-manager/templates/cainjector-psp-clusterrolebinding.yaml
new file mode 100644
index 0000000..e2bfa26
--- /dev/null
+++ b/charts/cert-manager/templates/cainjector-psp-clusterrolebinding.yaml
@@ -0,0 +1,22 @@
+{{- if .Values.cainjector.enabled }}
+{{- if .Values.global.podSecurityPolicy.enabled }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "cainjector.fullname" . }}-psp
+ labels:
+ app: {{ include "cainjector.name" . }}
+ app.kubernetes.io/name: {{ include "cainjector.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "cainjector"
+ {{- include "labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "cainjector.fullname" . }}-psp
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "cainjector.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+{{- end }}
+{{- end }}
diff --git a/charts/cert-manager/templates/cainjector-psp.yaml b/charts/cert-manager/templates/cainjector-psp.yaml
new file mode 100644
index 0000000..24f01da
--- /dev/null
+++ b/charts/cert-manager/templates/cainjector-psp.yaml
@@ -0,0 +1,51 @@
+{{- if .Values.cainjector.enabled }}
+{{- if .Values.global.podSecurityPolicy.enabled }}
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: {{ template "cainjector.fullname" . }}
+ labels:
+ app: {{ include "cainjector.name" . }}
+ app.kubernetes.io/name: {{ include "cainjector.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "cainjector"
+ {{- include "labels" . | nindent 4 }}
+ annotations:
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
+ seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+ {{- if .Values.global.podSecurityPolicy.useAppArmor }}
+ apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
+ apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+ {{- end }}
+spec:
+ privileged: false
+ allowPrivilegeEscalation: false
+ allowedCapabilities: [] # default set of capabilities are implicitly allowed
+ volumes:
+ - 'configMap'
+ - 'emptyDir'
+ - 'projected'
+ - 'secret'
+ - 'downwardAPI'
+ hostNetwork: false
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1000
+ max: 1000
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1000
+ max: 1000
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1000
+ max: 1000
+{{- end }}
+{{- end }}
diff --git a/charts/cert-manager/templates/cainjector-rbac.yaml b/charts/cert-manager/templates/cainjector-rbac.yaml
new file mode 100644
index 0000000..2aa59ee
--- /dev/null
+++ b/charts/cert-manager/templates/cainjector-rbac.yaml
@@ -0,0 +1,103 @@
+{{- if .Values.cainjector.enabled }}
+{{- if .Values.global.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "cainjector.fullname" . }}
+ labels:
+ app: {{ include "cainjector.name" . }}
+ app.kubernetes.io/name: {{ include "cainjector.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "cainjector"
+ {{- include "labels" . | nindent 4 }}
+rules:
+ - apiGroups: ["cert-manager.io"]
+ resources: ["certificates"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["get", "create", "update", "patch"]
+ - apiGroups: ["admissionregistration.k8s.io"]
+ resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"]
+ verbs: ["get", "list", "watch", "update", "patch"]
+ - apiGroups: ["apiregistration.k8s.io"]
+ resources: ["apiservices"]
+ verbs: ["get", "list", "watch", "update", "patch"]
+ - apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["get", "list", "watch", "update", "patch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "cainjector.fullname" . }}
+ labels:
+ app: {{ include "cainjector.name" . }}
+ app.kubernetes.io/name: {{ include "cainjector.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "cainjector"
+ {{- include "labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "cainjector.fullname" . }}
+subjects:
+ - name: {{ template "cainjector.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ kind: ServiceAccount
+
+---
+# leader election rules
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ template "cainjector.fullname" . }}:leaderelection
+ namespace: {{ .Values.global.leaderElection.namespace }}
+ labels:
+ app: {{ include "cainjector.name" . }}
+ app.kubernetes.io/name: {{ include "cainjector.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "cainjector"
+ {{- include "labels" . | nindent 4 }}
+rules:
+ # Used for leader election by the controller
+ # cert-manager-cainjector-leader-election is used by the CertificateBased injector controller
+ # see cmd/cainjector/start.go#L113
+ # cert-manager-cainjector-leader-election-core is used by the SecretBased injector controller
+ # see cmd/cainjector/start.go#L137
+ - apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ resourceNames: ["cert-manager-cainjector-leader-election", "cert-manager-cainjector-leader-election-core"]
+ verbs: ["get", "update", "patch"]
+ - apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ verbs: ["create"]
+
+---
+
+# grant cert-manager permission to manage the leaderelection configmap in the
+# leader election namespace
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ include "cainjector.fullname" . }}:leaderelection
+ namespace: {{ .Values.global.leaderElection.namespace }}
+ labels:
+ app: {{ include "cainjector.name" . }}
+ app.kubernetes.io/name: {{ include "cainjector.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "cainjector"
+ {{- include "labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ template "cainjector.fullname" . }}:leaderelection
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "cainjector.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+{{- end }}
+{{- end }}
diff --git a/charts/cert-manager/templates/cainjector-serviceaccount.yaml b/charts/cert-manager/templates/cainjector-serviceaccount.yaml
new file mode 100644
index 0000000..fedc731
--- /dev/null
+++ b/charts/cert-manager/templates/cainjector-serviceaccount.yaml
@@ -0,0 +1,27 @@
+{{- if .Values.cainjector.enabled }}
+{{- if .Values.cainjector.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+automountServiceAccountToken: {{ .Values.cainjector.serviceAccount.automountServiceAccountToken }}
+metadata:
+ name: {{ template "cainjector.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ {{- with .Values.cainjector.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ labels:
+ app: {{ include "cainjector.name" . }}
+ app.kubernetes.io/name: {{ include "cainjector.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "cainjector"
+ {{- include "labels" . | nindent 4 }}
+ {{- with .Values.cainjector.serviceAccount.labels }}
+ {{ toYaml . | nindent 4 }}
+ {{- end }}
+{{- with .Values.global.imagePullSecrets }}
+imagePullSecrets:
+ {{- toYaml . | nindent 2 }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/cert-manager/templates/crds.yaml b/charts/cert-manager/templates/crds.yaml
new file mode 100644
index 0000000..8206987
--- /dev/null
+++ b/charts/cert-manager/templates/crds.yaml
@@ -0,0 +1,4462 @@
+{{- if .Values.installCRDs }}
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: certificaterequests.cert-manager.io
+ labels:
+ app: '{{ template "cert-manager.name" . }}'
+ app.kubernetes.io/name: '{{ template "cert-manager.name" . }}'
+ app.kubernetes.io/instance: '{{ .Release.Name }}'
+ # Generated labels {{- include "labels" . | nindent 4 }}
+spec:
+ group: cert-manager.io
+ names:
+ kind: CertificateRequest
+ listKind: CertificateRequestList
+ plural: certificaterequests
+ shortNames:
+ - cr
+ - crs
+ singular: certificaterequest
+ categories:
+ - cert-manager
+ scope: Namespaced
+ versions:
+ - name: v1
+ subresources:
+ status: {}
+ additionalPrinterColumns:
+ - jsonPath: .status.conditions[?(@.type=="Approved")].status
+ name: Approved
+ type: string
+ - jsonPath: .status.conditions[?(@.type=="Denied")].status
+ name: Denied
+ type: string
+ - jsonPath: .status.conditions[?(@.type=="Ready")].status
+ name: Ready
+ type: string
+ - jsonPath: .spec.issuerRef.name
+ name: Issuer
+ type: string
+ - jsonPath: .spec.username
+ name: Requestor
+ type: string
+ - jsonPath: .status.conditions[?(@.type=="Ready")].message
+ name: Status
+ priority: 1
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.
+ name: Age
+ type: date
+ schema:
+ openAPIV3Schema:
+ description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a one-shot resource, meaning it represents a single point in time request for a certificate and cannot be re-used."
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Desired state of the CertificateRequest resource.
+ type: object
+ required:
+ - issuerRef
+ - request
+ properties:
+ duration:
+ description: The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types.
+ type: string
+ extra:
+ description: Extra contains extra attributes of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable.
+ type: object
+ additionalProperties:
+ type: array
+ items:
+ type: string
+ groups:
+ description: Groups contains group membership of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable.
+ type: array
+ items:
+ type: string
+ x-kubernetes-list-type: atomic
+ isCA:
+ description: IsCA will request to mark the certificate as valid for certificate signing when submitting to the issuer. This will automatically add the `cert sign` usage to the list of `usages`.
+ type: boolean
+ issuerRef:
+ description: IssuerRef is a reference to the issuer for this CertificateRequest. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to `cert-manager.io` if empty.
+ type: object
+ required:
+ - name
+ properties:
+ group:
+ description: Group of the resource being referred to.
+ type: string
+ kind:
+ description: Kind of the resource being referred to.
+ type: string
+ name:
+ description: Name of the resource being referred to.
+ type: string
+ request:
+ description: The PEM-encoded x509 certificate signing request to be submitted to the CA for signing.
+ type: string
+ format: byte
+ uid:
+ description: UID contains the uid of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable.
+ type: string
+ usages:
+ description: Usages is the set of x509 usages that are requested for the certificate. If usages are set they SHOULD be encoded inside the CSR spec Defaults to `digital signature` and `key encipherment` if not specified.
+ type: array
+ items:
+ description: "KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 \n Valid KeyUsage values are as follows: \"signing\", \"digital signature\", \"content commitment\", \"key encipherment\", \"key agreement\", \"data encipherment\", \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\", \"server auth\", \"client auth\", \"code signing\", \"email protection\", \"s/mime\", \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\", \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\""
+ type: string
+ enum:
+ - signing
+ - digital signature
+ - content commitment
+ - key encipherment
+ - key agreement
+ - data encipherment
+ - cert sign
+ - crl sign
+ - encipher only
+ - decipher only
+ - any
+ - server auth
+ - client auth
+ - code signing
+ - email protection
+ - s/mime
+ - ipsec end system
+ - ipsec tunnel
+ - ipsec user
+ - timestamping
+ - ocsp signing
+ - microsoft sgc
+ - netscape sgc
+ username:
+ description: Username contains the name of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable.
+ type: string
+ status:
+ description: Status of the CertificateRequest. This is set and managed automatically.
+ type: object
+ properties:
+ ca:
+ description: The PEM encoded x509 certificate of the signer, also known as the CA (Certificate Authority). This is set on a best-effort basis by different issuers. If not set, the CA is assumed to be unknown/not available.
+ type: string
+ format: byte
+ certificate:
+ description: The PEM encoded x509 certificate resulting from the certificate signing request. If not set, the CertificateRequest has either not been completed or has failed. More information on failure can be found by checking the `conditions` field.
+ type: string
+ format: byte
+ conditions:
+ description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready` and `InvalidRequest`.
+ type: array
+ items:
+ description: CertificateRequestCondition contains condition information for a CertificateRequest.
+ type: object
+ required:
+ - status
+ - type
+ properties:
+ lastTransitionTime:
+ description: LastTransitionTime is the timestamp corresponding to the last status change of this condition.
+ type: string
+ format: date-time
+ message:
+ description: Message is a human readable description of the details of the last transition, complementing reason.
+ type: string
+ reason:
+ description: Reason is a brief machine readable explanation for the condition's last transition.
+ type: string
+ status:
+ description: Status of the condition, one of (`True`, `False`, `Unknown`).
+ type: string
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type:
+ description: Type of the condition, known values are (`Ready`, `InvalidRequest`, `Approved`, `Denied`).
+ type: string
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ failureTime:
+ description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off.
+ type: string
+ format: date-time
+ served: true
+ storage: true
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: certificates.cert-manager.io
+ labels:
+ app: '{{ template "cert-manager.name" . }}'
+ app.kubernetes.io/name: '{{ template "cert-manager.name" . }}'
+ app.kubernetes.io/instance: '{{ .Release.Name }}'
+ # Generated labels {{- include "labels" . | nindent 4 }}
+spec:
+ group: cert-manager.io
+ names:
+ kind: Certificate
+ listKind: CertificateList
+ plural: certificates
+ shortNames:
+ - cert
+ - certs
+ singular: certificate
+ categories:
+ - cert-manager
+ scope: Namespaced
+ versions:
+ - name: v1
+ subresources:
+ status: {}
+ additionalPrinterColumns:
+ - jsonPath: .status.conditions[?(@.type=="Ready")].status
+ name: Ready
+ type: string
+ - jsonPath: .spec.secretName
+ name: Secret
+ type: string
+ - jsonPath: .spec.issuerRef.name
+ name: Issuer
+ priority: 1
+ type: string
+ - jsonPath: .status.conditions[?(@.type=="Ready")].message
+ name: Status
+ priority: 1
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.
+ name: Age
+ type: date
+ schema:
+ openAPIV3Schema:
+ description: "A Certificate resource should be created to ensure an up to date and signed x509 certificate is stored in the Kubernetes Secret resource named in `spec.secretName`. \n The stored certificate will be renewed before it expires (as configured by `spec.renewBefore`)."
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Desired state of the Certificate resource.
+ type: object
+ required:
+ - issuerRef
+ - secretName
+ properties:
+ additionalOutputFormats:
+ description: AdditionalOutputFormats defines extra output formats of the private key and signed certificate chain to be written to this Certificate's target Secret. This is an Alpha Feature and is only enabled with the `--feature-gates=AdditionalCertificateOutputFormats=true` option on both the controller and webhook components.
+ type: array
+ items:
+ description: CertificateAdditionalOutputFormat defines an additional output format of a Certificate resource. These contain supplementary data formats of the signed certificate chain and paired private key.
+ type: object
+ required:
+ - type
+ properties:
+ type:
+ description: Type is the name of the format type that should be written to the Certificate's target Secret.
+ type: string
+ enum:
+ - DER
+ - CombinedPEM
+ commonName:
+ description: 'CommonName is a common name to be used on the Certificate. The CommonName should have a length of 64 characters or fewer to avoid generating invalid CSRs. This value is ignored by TLS clients when any subject alt name is set. This is x509 behaviour: https://tools.ietf.org/html/rfc6125#section-6.4.4'
+ type: string
+ dnsNames:
+ description: DNSNames is a list of DNS subjectAltNames to be set on the Certificate.
+ type: array
+ items:
+ type: string
+ duration:
+ description: The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. If unset this defaults to 90 days. Certificate will be renewed either 2/3 through its duration or `renewBefore` period before its expiry, whichever is later. Minimum accepted duration is 1 hour. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration
+ type: string
+ emailAddresses:
+ description: EmailAddresses is a list of email subjectAltNames to be set on the Certificate.
+ type: array
+ items:
+ type: string
+ encodeUsagesInRequest:
+ description: EncodeUsagesInRequest controls whether key usages should be present in the CertificateRequest
+ type: boolean
+ ipAddresses:
+ description: IPAddresses is a list of IP address subjectAltNames to be set on the Certificate.
+ type: array
+ items:
+ type: string
+ isCA:
+ description: IsCA will mark this Certificate as valid for certificate signing. This will automatically add the `cert sign` usage to the list of `usages`.
+ type: boolean
+ issuerRef:
+ description: IssuerRef is a reference to the issuer for this certificate. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the Certificate will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times.
+ type: object
+ required:
+ - name
+ properties:
+ group:
+ description: Group of the resource being referred to.
+ type: string
+ kind:
+ description: Kind of the resource being referred to.
+ type: string
+ name:
+ description: Name of the resource being referred to.
+ type: string
+ keystores:
+ description: Keystores configures additional keystore output formats stored in the `secretName` Secret resource.
+ type: object
+ properties:
+ jks:
+ description: JKS configures options for storing a JKS keystore in the `spec.secretName` Secret resource.
+ type: object
+ required:
+ - create
+ - passwordSecretRef
+ properties:
+ create:
+ description: Create enables JKS keystore creation for the Certificate. If true, a file named `keystore.jks` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will be updated immediately. If the issuer provided a CA certificate, a file named `truststore.jks` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority
+ type: boolean
+ passwordSecretRef:
+ description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the JKS keystore.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ pkcs12:
+ description: PKCS12 configures options for storing a PKCS12 keystore in the `spec.secretName` Secret resource.
+ type: object
+ required:
+ - create
+ - passwordSecretRef
+ properties:
+ create:
+ description: Create enables PKCS12 keystore creation for the Certificate. If true, a file named `keystore.p12` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will be updated immediately. If the issuer provided a CA certificate, a file named `truststore.p12` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority
+ type: boolean
+ passwordSecretRef:
+ description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the PKCS12 keystore.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ literalSubject:
+ description: LiteralSubject is an LDAP formatted string that represents the [X.509 Subject field](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6). Use this *instead* of the Subject field if you need to ensure the correct ordering of the RDN sequence, such as when issuing certs for LDAP authentication. See https://github.com/cert-manager/cert-manager/issues/3203, https://github.com/cert-manager/cert-manager/issues/4424. This field is alpha level and is only supported by cert-manager installations where LiteralCertificateSubject feature gate is enabled on both cert-manager controller and webhook.
+ type: string
+ privateKey:
+ description: Options to control private keys used for the Certificate.
+ type: object
+ properties:
+ algorithm:
+ description: Algorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either `RSA`,`Ed25519` or `ECDSA` If `algorithm` is specified and `size` is not provided, key size of 256 will be used for `ECDSA` key algorithm and key size of 2048 will be used for `RSA` key algorithm. key size is ignored when using the `Ed25519` key algorithm.
+ type: string
+ enum:
+ - RSA
+ - ECDSA
+ - Ed25519
+ encoding:
+ description: The private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. If provided, allowed values are `PKCS1` and `PKCS8` standing for PKCS#1 and PKCS#8, respectively. Defaults to `PKCS1` if not specified.
+ type: string
+ enum:
+ - PKCS1
+ - PKCS8
+ rotationPolicy:
+ description: RotationPolicy controls how private keys should be regenerated when a re-issuance is being processed. If set to Never, a private key will only be generated if one does not already exist in the target `spec.secretName`. If one does exists but it does not have the correct algorithm or size, a warning will be raised to await user intervention. If set to Always, a private key matching the specified requirements will be generated whenever a re-issuance occurs. Default is 'Never' for backward compatibility.
+ type: string
+ enum:
+ - Never
+ - Always
+ size:
+ description: Size is the key bit size of the corresponding private key for this certificate. If `algorithm` is set to `RSA`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `algorithm` is set to `ECDSA`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. If `algorithm` is set to `Ed25519`, Size is ignored. No other values are allowed.
+ type: integer
+ renewBefore:
+ description: How long before the currently issued certificate's expiry cert-manager should renew the certificate. The default is 2/3 of the issued certificate's duration. Minimum accepted value is 5 minutes. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration
+ type: string
+ revisionHistoryLimit:
+ description: revisionHistoryLimit is the maximum number of CertificateRequest revisions that are maintained in the Certificate's history. Each revision represents a single `CertificateRequest` created by this Certificate, either when it was created, renewed, or Spec was changed. Revisions will be removed by oldest first if the number of revisions exceeds this number. If set, revisionHistoryLimit must be a value of `1` or greater. If unset (`nil`), revisions will not be garbage collected. Default value is `nil`.
+ type: integer
+ format: int32
+ secretName:
+ description: SecretName is the name of the secret resource that will be automatically created and managed by this Certificate resource. It will be populated with a private key and certificate, signed by the denoted issuer.
+ type: string
+ secretTemplate:
+ description: SecretTemplate defines annotations and labels to be copied to the Certificate's Secret. Labels and annotations on the Secret will be changed as they appear on the SecretTemplate when added or removed. SecretTemplate annotations are added in conjunction with, and cannot overwrite, the base set of annotations cert-manager sets on the Certificate's Secret.
+ type: object
+ properties:
+ annotations:
+ description: Annotations is a key value map to be copied to the target Kubernetes Secret.
+ type: object
+ additionalProperties:
+ type: string
+ labels:
+ description: Labels is a key value map to be copied to the target Kubernetes Secret.
+ type: object
+ additionalProperties:
+ type: string
+ subject:
+ description: Full X509 name specification (https://golang.org/pkg/crypto/x509/pkix/#Name).
+ type: object
+ properties:
+ countries:
+ description: Countries to be used on the Certificate.
+ type: array
+ items:
+ type: string
+ localities:
+ description: Cities to be used on the Certificate.
+ type: array
+ items:
+ type: string
+ organizationalUnits:
+ description: Organizational Units to be used on the Certificate.
+ type: array
+ items:
+ type: string
+ organizations:
+ description: Organizations to be used on the Certificate.
+ type: array
+ items:
+ type: string
+ postalCodes:
+ description: Postal codes to be used on the Certificate.
+ type: array
+ items:
+ type: string
+ provinces:
+ description: State/Provinces to be used on the Certificate.
+ type: array
+ items:
+ type: string
+ serialNumber:
+ description: Serial number to be used on the Certificate.
+ type: string
+ streetAddresses:
+ description: Street addresses to be used on the Certificate.
+ type: array
+ items:
+ type: string
+ uris:
+ description: URIs is a list of URI subjectAltNames to be set on the Certificate.
+ type: array
+ items:
+ type: string
+ usages:
+ description: Usages is the set of x509 usages that are requested for the certificate. Defaults to `digital signature` and `key encipherment` if not specified.
+ type: array
+ items:
+ description: "KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 \n Valid KeyUsage values are as follows: \"signing\", \"digital signature\", \"content commitment\", \"key encipherment\", \"key agreement\", \"data encipherment\", \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\", \"server auth\", \"client auth\", \"code signing\", \"email protection\", \"s/mime\", \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\", \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\""
+ type: string
+ enum:
+ - signing
+ - digital signature
+ - content commitment
+ - key encipherment
+ - key agreement
+ - data encipherment
+ - cert sign
+ - crl sign
+ - encipher only
+ - decipher only
+ - any
+ - server auth
+ - client auth
+ - code signing
+ - email protection
+ - s/mime
+ - ipsec end system
+ - ipsec tunnel
+ - ipsec user
+ - timestamping
+ - ocsp signing
+ - microsoft sgc
+ - netscape sgc
+ status:
+ description: Status of the Certificate. This is set and managed automatically.
+ type: object
+ properties:
+ conditions:
+ description: List of status conditions to indicate the status of certificates. Known condition types are `Ready` and `Issuing`.
+ type: array
+ items:
+ description: CertificateCondition contains condition information for an Certificate.
+ type: object
+ required:
+ - status
+ - type
+ properties:
+ lastTransitionTime:
+ description: LastTransitionTime is the timestamp corresponding to the last status change of this condition.
+ type: string
+ format: date-time
+ message:
+ description: Message is a human readable description of the details of the last transition, complementing reason.
+ type: string
+ observedGeneration:
+ description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Certificate.
+ type: integer
+ format: int64
+ reason:
+ description: Reason is a brief machine readable explanation for the condition's last transition.
+ type: string
+ status:
+ description: Status of the condition, one of (`True`, `False`, `Unknown`).
+ type: string
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type:
+ description: Type of the condition, known values are (`Ready`, `Issuing`).
+ type: string
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ failedIssuanceAttempts:
+ description: The number of continuous failed issuance attempts up till now. This field gets removed (if set) on a successful issuance and gets set to 1 if unset and an issuance has failed. If an issuance has failed, the delay till the next issuance will be calculated using formula time.Hour * 2 ^ (failedIssuanceAttempts - 1).
+ type: integer
+ lastFailureTime:
+ description: LastFailureTime is set only if the lastest issuance for this Certificate failed and contains the time of the failure. If an issuance has failed, the delay till the next issuance will be calculated using formula time.Hour * 2 ^ (failedIssuanceAttempts - 1). If the latest issuance has succeeded this field will be unset.
+ type: string
+ format: date-time
+ nextPrivateKeySecretName:
+ description: The name of the Secret resource containing the private key to be used for the next certificate iteration. The keymanager controller will automatically set this field if the `Issuing` condition is set to `True`. It will automatically unset this field when the Issuing condition is not set or False.
+ type: string
+ notAfter:
+ description: The expiration time of the certificate stored in the secret named by this resource in `spec.secretName`.
+ type: string
+ format: date-time
+ notBefore:
+ description: The time after which the certificate stored in the secret named by this resource in spec.secretName is valid.
+ type: string
+ format: date-time
+ renewalTime:
+ description: RenewalTime is the time at which the certificate will be next renewed. If not set, no upcoming renewal is scheduled.
+ type: string
+ format: date-time
+ revision:
+ description: "The current 'revision' of the certificate as issued. \n When a CertificateRequest resource is created, it will have the `cert-manager.io/certificate-revision` set to one greater than the current value of this field. \n Upon issuance, this field will be set to the value of the annotation on the CertificateRequest resource used to issue the certificate. \n Persisting the value on the CertificateRequest resource allows the certificates controller to know whether a request is part of an old issuance or if it is part of the ongoing revision's issuance by checking if the revision value in the annotation is greater than this field."
+ type: integer
+ served: true
+ storage: true
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: challenges.acme.cert-manager.io
+ labels:
+ app: '{{ template "cert-manager.name" . }}'
+ app.kubernetes.io/name: '{{ template "cert-manager.name" . }}'
+ app.kubernetes.io/instance: '{{ .Release.Name }}'
+ # Generated labels {{- include "labels" . | nindent 4 }}
+spec:
+ group: acme.cert-manager.io
+ names:
+ kind: Challenge
+ listKind: ChallengeList
+ plural: challenges
+ singular: challenge
+ categories:
+ - cert-manager
+ - cert-manager-acme
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .status.state
+ name: State
+ type: string
+ - jsonPath: .spec.dnsName
+ name: Domain
+ type: string
+ - jsonPath: .status.reason
+ name: Reason
+ priority: 1
+ type: string
+ - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.
+ jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Challenge is a type to represent a Challenge request with an ACME server
+ type: object
+ required:
+ - metadata
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ type: object
+ required:
+ - authorizationURL
+ - dnsName
+ - issuerRef
+ - key
+ - solver
+ - token
+ - type
+ - url
+ properties:
+ authorizationURL:
+ description: The URL to the ACME Authorization resource that this challenge is a part of.
+ type: string
+ dnsName:
+ description: dnsName is the identifier that this challenge is for, e.g. example.com. If the requested DNSName is a 'wildcard', this field MUST be set to the non-wildcard domain, e.g. for `*.example.com`, it must be `example.com`.
+ type: string
+ issuerRef:
+ description: References a properly configured ACME-type Issuer which should be used to create this Challenge. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Challenge will be marked as failed.
+ type: object
+ required:
+ - name
+ properties:
+ group:
+ description: Group of the resource being referred to.
+ type: string
+ kind:
+ description: Kind of the resource being referred to.
+ type: string
+ name:
+ description: Name of the resource being referred to.
+ type: string
+ key:
+ description: 'The ACME challenge key for this challenge For HTTP01 challenges, this is the value that must be responded with to complete the HTTP01 challenge in the format: `<private key JWK thumbprint>.<key from acme server for challenge>`. For DNS01 challenges, this is the base64 encoded SHA256 sum of the `<private key JWK thumbprint>.<key from acme server for challenge>` text that must be set as the TXT record content.'
+ type: string
+ solver:
+ description: Contains the domain solving configuration that should be used to solve this challenge resource.
+ type: object
+ properties:
+ dns01:
+ description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow.
+ type: object
+ properties:
+ acmeDNS:
+ description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records.
+ type: object
+ required:
+ - accountSecretRef
+ - host
+ properties:
+ accountSecretRef:
+ description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ host:
+ type: string
+ akamai:
+ description: Use the Akamai DNS zone management API to manage DNS01 challenge records.
+ type: object
+ required:
+ - accessTokenSecretRef
+ - clientSecretSecretRef
+ - clientTokenSecretRef
+ - serviceConsumerDomain
+ properties:
+ accessTokenSecretRef:
+ description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ clientSecretSecretRef:
+ description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ clientTokenSecretRef:
+ description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ serviceConsumerDomain:
+ type: string
+ azureDNS:
+ description: Use the Microsoft Azure DNS API to manage DNS01 challenge records.
+ type: object
+ required:
+ - resourceGroupName
+ - subscriptionID
+ properties:
+ clientID:
+ description: if both this and ClientSecret are left unset MSI will be used
+ type: string
+ clientSecretSecretRef:
+ description: if both this and ClientID are left unset MSI will be used
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ environment:
+ description: name of the Azure environment (default AzurePublicCloud)
+ type: string
+ enum:
+ - AzurePublicCloud
+ - AzureChinaCloud
+ - AzureGermanCloud
+ - AzureUSGovernmentCloud
+ hostedZoneName:
+ description: name of the DNS zone that should be used
+ type: string
+ managedIdentity:
+ description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID
+ type: object
+ properties:
+ clientID:
+ description: client ID of the managed identity, can not be used at the same time as resourceID
+ type: string
+ resourceID:
+ description: resource ID of the managed identity, can not be used at the same time as clientID
+ type: string
+ resourceGroupName:
+ description: resource group the DNS zone is located in
+ type: string
+ subscriptionID:
+ description: ID of the Azure subscription
+ type: string
+ tenantID:
+ description: when specifying ClientID and ClientSecret then this field is also needed
+ type: string
+ cloudDNS:
+ description: Use the Google Cloud DNS API to manage DNS01 challenge records.
+ type: object
+ required:
+ - project
+ properties:
+ hostedZoneName:
+ description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone.
+ type: string
+ project:
+ type: string
+ serviceAccountSecretRef:
+ description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ cloudflare:
+ description: Use the Cloudflare API to manage DNS01 challenge records.
+ type: object
+ properties:
+ apiKeySecretRef:
+ description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.'
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ apiTokenSecretRef:
+ description: API token used to authenticate with Cloudflare.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ email:
+ description: Email of the account, only required when using API key based authentication.
+ type: string
+ cnameStrategy:
+ description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones.
+ type: string
+ enum:
+ - None
+ - Follow
+ digitalocean:
+ description: Use the DigitalOcean DNS API to manage DNS01 challenge records.
+ type: object
+ required:
+ - tokenSecretRef
+ properties:
+ tokenSecretRef:
+ description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ rfc2136:
+ description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records.
+ type: object
+ required:
+ - nameserver
+ properties:
+ nameserver:
+ description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required.
+ type: string
+ tsigAlgorithm:
+ description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.'
+ type: string
+ tsigKeyName:
+ description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required.
+ type: string
+ tsigSecretSecretRef:
+ description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ route53:
+ description: Use the AWS Route53 API to manage DNS01 challenge records.
+ type: object
+ required:
+ - region
+ properties:
+ accessKeyID:
+ description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials'
+ type: string
+ accessKeyIDSecretRef:
+ description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials'
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ hostedZoneID:
+ description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call.
+ type: string
+ region:
+ description: Always set the region when using AccessKeyID and SecretAccessKey
+ type: string
+ role:
+ description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata
+ type: string
+ secretAccessKeySecretRef:
+ description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials'
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ webhook:
+ description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records.
+ type: object
+ required:
+ - groupName
+ - solverName
+ properties:
+ config:
+ description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation.
+ x-kubernetes-preserve-unknown-fields: true
+ groupName:
+ description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation.
+ type: string
+ solverName:
+ description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'.
+ type: string
+ http01:
+ description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism.
+ type: object
+ properties:
+ gatewayHTTPRoute:
+ description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future.
+ type: object
+ properties:
+ labels:
+ description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges.
+ type: object
+ additionalProperties:
+ type: string
+ parentRefs:
+ description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways'
+ type: array
+ items:
+ description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid."
+ type: object
+ required:
+ - name
+ properties:
+ group:
+ description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core"
+ type: string
+ default: gateway.networking.k8s.io
+ maxLength: 253
+ pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ kind:
+ description: "Kind is kind of the referent. \n Support: Core (Gateway) \n Support: Implementation-specific (Other Resources)"
+ type: string
+ default: Gateway
+ maxLength: 63
+ minLength: 1
+ pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
+ name:
+ description: "Name is the name of the referent. \n Support: Core"
+ type: string
+ maxLength: 253
+ minLength: 1
+ namespace:
+ description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n Support: Core"
+ type: string
+ maxLength: 63
+ minLength: 1
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
+ port:
+ description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n <gateway:experimental>"
+ type: integer
+ format: int32
+ maximum: 65535
+ minimum: 1
+ sectionName:
+ description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core"
+ type: string
+ maxLength: 253
+ minLength: 1
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ serviceType:
+ description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort.
+ type: string
+ ingress:
+ description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed.
+ type: object
+ properties:
+ class:
+ description: This field configures the annotation `kubernetes.io/ingress.class` when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of `class`, `name` or `ingressClassName` may be specified.
+ type: string
+ ingressClassName:
+ description: This field configures the field `ingressClassName` on the created Ingress resources used to solve ACME challenges that use this challenge solver. This is the recommended way of configuring the ingress class. Only one of `class`, `name` or `ingressClassName` may be specified.
+ type: string
+ ingressTemplate:
+ description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges.
+ type: object
+ properties:
+ metadata:
+ description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values.
+ type: object
+ properties:
+ annotations:
+ description: Annotations that should be added to the created ACME HTTP01 solver ingress.
+ type: object
+ additionalProperties:
+ type: string
+ labels:
+ description: Labels that should be added to the created ACME HTTP01 solver ingress.
+ type: object
+ additionalProperties:
+ type: string
+ name:
+ description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. Only one of `class`, `name` or `ingressClassName` may be specified.
+ type: string
+ podTemplate:
+ description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges.
+ type: object
+ properties:
+ metadata:
+ description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values.
+ type: object
+ properties:
+ annotations:
+ description: Annotations that should be added to the create ACME HTTP01 solver pods.
+ type: object
+ additionalProperties:
+ type: string
+ labels:
+ description: Labels that should be added to the created ACME HTTP01 solver pods.
+ type: object
+ additionalProperties:
+ type: string
+ spec:
+ description: PodSpec defines overrides for the HTTP01 challenge solver pod. Check ACMEChallengeSolverHTTP01IngressPodSpec to find out currently supported fields. All other fields will be ignored.
+ type: object
+ properties:
+ affinity:
+ description: If specified, the pod's scheduling constraints
+ type: object
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling rules for the pod.
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
+ type: array
+ items:
+ description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ type: object
+ required:
+ - preference
+ - weight
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding weight.
+ type: object
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ type: array
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ type: array
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ x-kubernetes-map-type: atomic
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ type: integer
+ format: int32
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
+ type: object
+ required:
+ - nodeSelectorTerms
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are ORed.
+ type: array
+ items:
+ description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ type: object
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ type: array
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ type: array
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ x-kubernetes-map-type: atomic
+ x-kubernetes-map-type: atomic
+ podAffinity:
+ description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ type: array
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ type: object
+ required:
+ - podAffinityTerm
+ - weight
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ type: object
+ required:
+ - topologyKey
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ type: array
+ items:
+ type: string
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ type: integer
+ format: int32
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ type: array
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ type: object
+ required:
+ - topologyKey
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ type: array
+ items:
+ type: string
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ podAntiAffinity:
+ description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ type: array
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ type: object
+ required:
+ - podAffinityTerm
+ - weight
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ type: object
+ required:
+ - topologyKey
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ type: array
+ items:
+ type: string
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ type: integer
+ format: int32
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ type: array
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ type: object
+ required:
+ - topologyKey
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ type: array
+ items:
+ type: string
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ imagePullSecrets:
+ description: If specified, the pod's imagePullSecrets
+ type: array
+ items:
+ description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.
+ type: object
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ x-kubernetes-map-type: atomic
+ nodeSelector:
+ description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/'
+ type: object
+ additionalProperties:
+ type: string
+ priorityClassName:
+ description: If specified, the pod's priorityClassName.
+ type: string
+ serviceAccountName:
+ description: If specified, the pod's service account
+ type: string
+ tolerations:
+ description: If specified, the pod's tolerations.
+ type: array
+ items:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
+ type: object
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ type: integer
+ format: int64
+ value:
+ description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ serviceType:
+ description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort.
+ type: string
+ selector:
+ description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead.
+ type: object
+ properties:
+ dnsNames:
+ description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected.
+ type: array
+ items:
+ type: string
+ dnsZones:
+ description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to.
+ type: object
+ additionalProperties:
+ type: string
+ token:
+ description: The ACME challenge token for this challenge. This is the raw value returned from the ACME server.
+ type: string
+ type:
+ description: The type of ACME challenge this resource represents. One of "HTTP-01" or "DNS-01".
+ type: string
+ enum:
+ - HTTP-01
+ - DNS-01
+ url:
+ description: The URL of the ACME Challenge resource for this challenge. This can be used to lookup details about the status of this challenge.
+ type: string
+ wildcard:
+ description: wildcard will be true if this challenge is for a wildcard identifier, for example '*.example.com'.
+ type: boolean
+ status:
+ type: object
+ properties:
+ presented:
+ description: presented will be set to true if the challenge values for this challenge are currently 'presented'. This *does not* imply the self check is passing. Only that the values have been 'submitted' for the appropriate challenge mechanism (i.e. the DNS01 TXT record has been presented, or the HTTP01 configuration has been configured).
+ type: boolean
+ processing:
+ description: Used to denote whether this challenge should be processed or not. This field will only be set to true by the 'scheduling' component. It will only be set to false by the 'challenges' controller, after the challenge has reached a final state or timed out. If this field is set to false, the challenge controller will not take any more action.
+ type: boolean
+ reason:
+ description: Contains human readable information on why the Challenge is in the current state.
+ type: string
+ state:
+ description: Contains the current 'state' of the challenge. If not set, the state of the challenge is unknown.
+ type: string
+ enum:
+ - valid
+ - ready
+ - pending
+ - processing
+ - invalid
+ - expired
+ - errored
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: clusterissuers.cert-manager.io
+ labels:
+ app: '{{ template "cert-manager.name" . }}'
+ app.kubernetes.io/name: '{{ template "cert-manager.name" . }}'
+ app.kubernetes.io/instance: "{{ .Release.Name }}"
+ # Generated labels {{- include "labels" . | nindent 4 }}
+spec:
+ group: cert-manager.io
+ names:
+ kind: ClusterIssuer
+ listKind: ClusterIssuerList
+ plural: clusterissuers
+ singular: clusterissuer
+ categories:
+ - cert-manager
+ scope: Cluster
+ versions:
+ - name: v1
+ subresources:
+ status: {}
+ additionalPrinterColumns:
+ - jsonPath: .status.conditions[?(@.type=="Ready")].status
+ name: Ready
+ type: string
+ - jsonPath: .status.conditions[?(@.type=="Ready")].message
+ name: Status
+ priority: 1
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.
+ name: Age
+ type: date
+ schema:
+ openAPIV3Schema:
+ description: A ClusterIssuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is similar to an Issuer, however it is cluster-scoped and therefore can be referenced by resources that exist in *any* namespace, not just the same namespace as the referent.
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Desired state of the ClusterIssuer resource.
+ type: object
+ properties:
+ acme:
+ description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates.
+ type: object
+ required:
+ - privateKeySecretRef
+ - server
+ properties:
+ caBundle:
+ description: Base64-encoded bundle of PEM CAs which can be used to validate the certificate chain presented by the ACME server. Mutually exclusive with SkipTLSVerify; prefer using CABundle to prevent various kinds of security vulnerabilities. If CABundle and SkipTLSVerify are unset, the system certificate bundle inside the container is used to validate the TLS connection.
+ type: string
+ format: byte
+ disableAccountKeyGeneration:
+ description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false.
+ type: boolean
+ email:
+ description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered.
+ type: string
+ enableDurationFeature:
+ description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false.
+ type: boolean
+ externalAccountBinding:
+ description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account.
+ type: object
+ required:
+ - keyID
+ - keySecretRef
+ properties:
+ keyAlgorithm:
+ description: 'Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme.'
+ type: string
+ enum:
+ - HS256
+ - HS384
+ - HS512
+ keyID:
+ description: keyID is the ID of the CA key that the External Account is bound to.
+ type: string
+ keySecretRef:
+ description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ preferredChain:
+ description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN'
+ type: string
+ maxLength: 64
+ privateKeySecretRef:
+ description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ server:
+ description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.'
+ type: string
+ skipTLSVerify:
+ description: 'INSECURE: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have the TLS certificate chain validated. Mutually exclusive with CABundle; prefer using CABundle to prevent various kinds of security vulnerabilities. Only enable this option in development environments. If CABundle and SkipTLSVerify are unset, the system certificate bundle inside the container is used to validate the TLS connection. Defaults to false.'
+ type: boolean
+ solvers:
+ description: 'Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/'
+ type: array
+ items:
+ description: An ACMEChallengeSolver describes how to solve ACME challenges for the issuer it is part of. A selector may be provided to use different solving strategies for different DNS names. Only one of HTTP01 or DNS01 must be provided.
+ type: object
+ properties:
+ dns01:
+ description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow.
+ type: object
+ properties:
+ acmeDNS:
+ description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records.
+ type: object
+ required:
+ - accountSecretRef
+ - host
+ properties:
+ accountSecretRef:
+ description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ host:
+ type: string
+ akamai:
+ description: Use the Akamai DNS zone management API to manage DNS01 challenge records.
+ type: object
+ required:
+ - accessTokenSecretRef
+ - clientSecretSecretRef
+ - clientTokenSecretRef
+ - serviceConsumerDomain
+ properties:
+ accessTokenSecretRef:
+ description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ clientSecretSecretRef:
+ description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ clientTokenSecretRef:
+ description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ serviceConsumerDomain:
+ type: string
+ azureDNS:
+ description: Use the Microsoft Azure DNS API to manage DNS01 challenge records.
+ type: object
+ required:
+ - resourceGroupName
+ - subscriptionID
+ properties:
+ clientID:
+ description: if both this and ClientSecret are left unset MSI will be used
+ type: string
+ clientSecretSecretRef:
+ description: if both this and ClientID are left unset MSI will be used
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ environment:
+ description: name of the Azure environment (default AzurePublicCloud)
+ type: string
+ enum:
+ - AzurePublicCloud
+ - AzureChinaCloud
+ - AzureGermanCloud
+ - AzureUSGovernmentCloud
+ hostedZoneName:
+ description: name of the DNS zone that should be used
+ type: string
+ managedIdentity:
+ description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID
+ type: object
+ properties:
+ clientID:
+ description: client ID of the managed identity, can not be used at the same time as resourceID
+ type: string
+ resourceID:
+ description: resource ID of the managed identity, can not be used at the same time as clientID
+ type: string
+ resourceGroupName:
+ description: resource group the DNS zone is located in
+ type: string
+ subscriptionID:
+ description: ID of the Azure subscription
+ type: string
+ tenantID:
+ description: when specifying ClientID and ClientSecret then this field is also needed
+ type: string
+ cloudDNS:
+ description: Use the Google Cloud DNS API to manage DNS01 challenge records.
+ type: object
+ required:
+ - project
+ properties:
+ hostedZoneName:
+ description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone.
+ type: string
+ project:
+ type: string
+ serviceAccountSecretRef:
+ description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ cloudflare:
+ description: Use the Cloudflare API to manage DNS01 challenge records.
+ type: object
+ properties:
+ apiKeySecretRef:
+ description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.'
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ apiTokenSecretRef:
+ description: API token used to authenticate with Cloudflare.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ email:
+ description: Email of the account, only required when using API key based authentication.
+ type: string
+ cnameStrategy:
+ description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones.
+ type: string
+ enum:
+ - None
+ - Follow
+ digitalocean:
+ description: Use the DigitalOcean DNS API to manage DNS01 challenge records.
+ type: object
+ required:
+ - tokenSecretRef
+ properties:
+ tokenSecretRef:
+ description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ rfc2136:
+ description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records.
+ type: object
+ required:
+ - nameserver
+ properties:
+ nameserver:
+ description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required.
+ type: string
+ tsigAlgorithm:
+ description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.'
+ type: string
+ tsigKeyName:
+ description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required.
+ type: string
+ tsigSecretSecretRef:
+ description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ route53:
+ description: Use the AWS Route53 API to manage DNS01 challenge records.
+ type: object
+ required:
+ - region
+ properties:
+ accessKeyID:
+ description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials'
+ type: string
+ accessKeyIDSecretRef:
+ description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials'
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ hostedZoneID:
+ description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call.
+ type: string
+ region:
+ description: Always set the region when using AccessKeyID and SecretAccessKey
+ type: string
+ role:
+ description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata
+ type: string
+ secretAccessKeySecretRef:
+ description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials'
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ webhook:
+ description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records.
+ type: object
+ required:
+ - groupName
+ - solverName
+ properties:
+ config:
+ description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation.
+ x-kubernetes-preserve-unknown-fields: true
+ groupName:
+ description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation.
+ type: string
+ solverName:
+ description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'.
+ type: string
+ http01:
+ description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism.
+ type: object
+ properties:
+ gatewayHTTPRoute:
+ description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future.
+ type: object
+ properties:
+ labels:
+ description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges.
+ type: object
+ additionalProperties:
+ type: string
+ parentRefs:
+ description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways'
+ type: array
+ items:
+ description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid."
+ type: object
+ required:
+ - name
+ properties:
+ group:
+ description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core"
+ type: string
+ default: gateway.networking.k8s.io
+ maxLength: 253
+ pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ kind:
+ description: "Kind is kind of the referent. \n Support: Core (Gateway) \n Support: Implementation-specific (Other Resources)"
+ type: string
+ default: Gateway
+ maxLength: 63
+ minLength: 1
+ pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
+ name:
+ description: "Name is the name of the referent. \n Support: Core"
+ type: string
+ maxLength: 253
+ minLength: 1
+ namespace:
+ description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n Support: Core"
+ type: string
+ maxLength: 63
+ minLength: 1
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
+ port:
+ description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n <gateway:experimental>"
+ type: integer
+ format: int32
+ maximum: 65535
+ minimum: 1
+ sectionName:
+ description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core"
+ type: string
+ maxLength: 253
+ minLength: 1
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ serviceType:
+ description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort.
+ type: string
+ ingress:
+ description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed.
+ type: object
+ properties:
+ class:
+ description: This field configures the annotation `kubernetes.io/ingress.class` when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of `class`, `name` or `ingressClassName` may be specified.
+ type: string
+ ingressClassName:
+ description: This field configures the field `ingressClassName` on the created Ingress resources used to solve ACME challenges that use this challenge solver. This is the recommended way of configuring the ingress class. Only one of `class`, `name` or `ingressClassName` may be specified.
+ type: string
+ ingressTemplate:
+ description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges.
+ type: object
+ properties:
+ metadata:
+ description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values.
+ type: object
+ properties:
+ annotations:
+ description: Annotations that should be added to the created ACME HTTP01 solver ingress.
+ type: object
+ additionalProperties:
+ type: string
+ labels:
+ description: Labels that should be added to the created ACME HTTP01 solver ingress.
+ type: object
+ additionalProperties:
+ type: string
+ name:
+ description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. Only one of `class`, `name` or `ingressClassName` may be specified.
+ type: string
+ podTemplate:
+ description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges.
+ type: object
+ properties:
+ metadata:
+ description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values.
+ type: object
+ properties:
+ annotations:
+ description: Annotations that should be added to the create ACME HTTP01 solver pods.
+ type: object
+ additionalProperties:
+ type: string
+ labels:
+ description: Labels that should be added to the created ACME HTTP01 solver pods.
+ type: object
+ additionalProperties:
+ type: string
+ spec:
+ description: PodSpec defines overrides for the HTTP01 challenge solver pod. Check ACMEChallengeSolverHTTP01IngressPodSpec to find out currently supported fields. All other fields will be ignored.
+ type: object
+ properties:
+ affinity:
+ description: If specified, the pod's scheduling constraints
+ type: object
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling rules for the pod.
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
+ type: array
+ items:
+ description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ type: object
+ required:
+ - preference
+ - weight
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding weight.
+ type: object
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ type: array
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ type: array
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ x-kubernetes-map-type: atomic
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ type: integer
+ format: int32
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
+ type: object
+ required:
+ - nodeSelectorTerms
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are ORed.
+ type: array
+ items:
+ description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ type: object
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ type: array
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ type: array
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ x-kubernetes-map-type: atomic
+ x-kubernetes-map-type: atomic
+ podAffinity:
+ description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ type: array
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ type: object
+ required:
+ - podAffinityTerm
+ - weight
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ type: object
+ required:
+ - topologyKey
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ type: array
+ items:
+ type: string
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ type: integer
+ format: int32
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ type: array
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ type: object
+ required:
+ - topologyKey
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ type: array
+ items:
+ type: string
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ podAntiAffinity:
+ description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ type: array
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ type: object
+ required:
+ - podAffinityTerm
+ - weight
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ type: object
+ required:
+ - topologyKey
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ type: array
+ items:
+ type: string
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ type: integer
+ format: int32
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ type: array
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ type: object
+ required:
+ - topologyKey
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ type: array
+ items:
+ type: string
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ imagePullSecrets:
+ description: If specified, the pod's imagePullSecrets
+ type: array
+ items:
+ description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.
+ type: object
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ x-kubernetes-map-type: atomic
+ nodeSelector:
+ description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/'
+ type: object
+ additionalProperties:
+ type: string
+ priorityClassName:
+ description: If specified, the pod's priorityClassName.
+ type: string
+ serviceAccountName:
+ description: If specified, the pod's service account
+ type: string
+ tolerations:
+ description: If specified, the pod's tolerations.
+ type: array
+ items:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
+ type: object
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ type: integer
+ format: int64
+ value:
+ description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ serviceType:
+ description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort.
+ type: string
+ selector:
+ description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead.
+ type: object
+ properties:
+ dnsNames:
+ description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected.
+ type: array
+ items:
+ type: string
+ dnsZones:
+ description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to.
+ type: object
+ additionalProperties:
+ type: string
+ ca:
+ description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager.
+ type: object
+ required:
+ - secretName
+ properties:
+ crlDistributionPoints:
+ description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set.
+ type: array
+ items:
+ type: string
+ ocspServers:
+ description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org".
+ type: array
+ items:
+ type: string
+ secretName:
+ description: SecretName is the name of the secret used to sign Certificates issued by this Issuer.
+ type: string
+ selfSigned:
+ description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object.
+ type: object
+ properties:
+ crlDistributionPoints:
+ description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings.
+ type: array
+ items:
+ type: string
+ vault:
+ description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend.
+ type: object
+ required:
+ - auth
+ - path
+ - server
+ properties:
+ auth:
+ description: Auth configures how cert-manager authenticates with the Vault server.
+ type: object
+ properties:
+ appRole:
+ description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource.
+ type: object
+ required:
+ - path
+ - roleId
+ - secretRef
+ properties:
+ path:
+ description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"'
+ type: string
+ roleId:
+ description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault.
+ type: string
+ secretRef:
+ description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ kubernetes:
+ description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server.
+ type: object
+ required:
+ - role
+ properties:
+ mountPath:
+ description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used.
+ type: string
+ role:
+ description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies.
+ type: string
+ secretRef:
+ description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ serviceAccountRef:
+ description: A reference to a service account that will be used to request a bound token (also known as "projected token"). Compared to using "secretRef", using this field means that you don't rely on statically bound tokens. To use this field, you must configure an RBAC rule to let cert-manager request a token.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: Name of the ServiceAccount used to request a token.
+ type: string
+ tokenSecretRef:
+ description: TokenSecretRef authenticates with Vault by presenting a token.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ caBundle:
+ description: Base64-encoded bundle of PEM CAs which will be used to validate the certificate chain presented by Vault. Only used if using HTTPS to connect to Vault and ignored for HTTP connections. Mutually exclusive with CABundleSecretRef. If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in the cert-manager controller container is used to validate the TLS connection.
+ type: string
+ format: byte
+ caBundleSecretRef:
+ description: Reference to a Secret containing a bundle of PEM-encoded CAs to use when verifying the certificate chain presented by Vault when using HTTPS. Mutually exclusive with CABundle. If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in the cert-manager controller container is used to validate the TLS connection. If no key for the Secret is specified, cert-manager will default to 'ca.crt'.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ namespace:
+ description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces'
+ type: string
+ path:
+ description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".'
+ type: string
+ server:
+ description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".'
+ type: string
+ venafi:
+ description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone.
+ type: object
+ required:
+ - zone
+ properties:
+ cloud:
+ description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified.
+ type: object
+ required:
+ - apiTokenSecretRef
+ properties:
+ apiTokenSecretRef:
+ description: APITokenSecretRef is a secret key selector for the Venafi Cloud API token.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ url:
+ description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1".
+ type: string
+ tpp:
+ description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified.
+ type: object
+ required:
+ - credentialsRef
+ - url
+ properties:
+ caBundle:
+ description: Base64-encoded bundle of PEM CAs which will be used to validate the certificate chain presented by the TPP server. Only used if using HTTPS; ignored for HTTP. If undefined, the certificate bundle in the cert-manager controller container is used to validate the chain.
+ type: string
+ format: byte
+ credentialsRef:
+ description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ url:
+ description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".'
+ type: string
+ zone:
+ description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required.
+ type: string
+ status:
+ description: Status of the ClusterIssuer. This is set and managed automatically.
+ type: object
+ properties:
+ acme:
+ description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates.
+ type: object
+ properties:
+ lastPrivateKeyHash:
+ description: LastPrivateKeyHash is a hash of the private key associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer
+ type: string
+ lastRegisteredEmail:
+ description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer
+ type: string
+ uri:
+ description: URI is the unique account identifier, which can also be used to retrieve account details from the CA
+ type: string
+ conditions:
+ description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`.
+ type: array
+ items:
+ description: IssuerCondition contains condition information for an Issuer.
+ type: object
+ required:
+ - status
+ - type
+ properties:
+ lastTransitionTime:
+ description: LastTransitionTime is the timestamp corresponding to the last status change of this condition.
+ type: string
+ format: date-time
+ message:
+ description: Message is a human readable description of the details of the last transition, complementing reason.
+ type: string
+ observedGeneration:
+ description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer.
+ type: integer
+ format: int64
+ reason:
+ description: Reason is a brief machine readable explanation for the condition's last transition.
+ type: string
+ status:
+ description: Status of the condition, one of (`True`, `False`, `Unknown`).
+ type: string
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type:
+ description: Type of the condition, known values are (`Ready`).
+ type: string
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ served: true
+ storage: true
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: issuers.cert-manager.io
+ labels:
+ app: '{{ template "cert-manager.name" . }}'
+ app.kubernetes.io/name: '{{ template "cert-manager.name" . }}'
+ app.kubernetes.io/instance: "{{ .Release.Name }}"
+ # Generated labels {{- include "labels" . | nindent 4 }}
+spec:
+ group: cert-manager.io
+ names:
+ kind: Issuer
+ listKind: IssuerList
+ plural: issuers
+ singular: issuer
+ categories:
+ - cert-manager
+ scope: Namespaced
+ versions:
+ - name: v1
+ subresources:
+ status: {}
+ additionalPrinterColumns:
+ - jsonPath: .status.conditions[?(@.type=="Ready")].status
+ name: Ready
+ type: string
+ - jsonPath: .status.conditions[?(@.type=="Ready")].message
+ name: Status
+ priority: 1
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.
+ name: Age
+ type: date
+ schema:
+ openAPIV3Schema:
+ description: An Issuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is scoped to a single namespace and can therefore only be referenced by resources within the same namespace.
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Desired state of the Issuer resource.
+ type: object
+ properties:
+ acme:
+ description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates.
+ type: object
+ required:
+ - privateKeySecretRef
+ - server
+ properties:
+ caBundle:
+ description: Base64-encoded bundle of PEM CAs which can be used to validate the certificate chain presented by the ACME server. Mutually exclusive with SkipTLSVerify; prefer using CABundle to prevent various kinds of security vulnerabilities. If CABundle and SkipTLSVerify are unset, the system certificate bundle inside the container is used to validate the TLS connection.
+ type: string
+ format: byte
+ disableAccountKeyGeneration:
+ description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false.
+ type: boolean
+ email:
+ description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered.
+ type: string
+ enableDurationFeature:
+ description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false.
+ type: boolean
+ externalAccountBinding:
+ description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account.
+ type: object
+ required:
+ - keyID
+ - keySecretRef
+ properties:
+ keyAlgorithm:
+ description: 'Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme.'
+ type: string
+ enum:
+ - HS256
+ - HS384
+ - HS512
+ keyID:
+ description: keyID is the ID of the CA key that the External Account is bound to.
+ type: string
+ keySecretRef:
+ description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ preferredChain:
+ description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN'
+ type: string
+ maxLength: 64
+ privateKeySecretRef:
+ description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ server:
+ description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.'
+ type: string
+ skipTLSVerify:
+ description: 'INSECURE: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have the TLS certificate chain validated. Mutually exclusive with CABundle; prefer using CABundle to prevent various kinds of security vulnerabilities. Only enable this option in development environments. If CABundle and SkipTLSVerify are unset, the system certificate bundle inside the container is used to validate the TLS connection. Defaults to false.'
+ type: boolean
+ solvers:
+ description: 'Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/'
+ type: array
+ items:
+ description: An ACMEChallengeSolver describes how to solve ACME challenges for the issuer it is part of. A selector may be provided to use different solving strategies for different DNS names. Only one of HTTP01 or DNS01 must be provided.
+ type: object
+ properties:
+ dns01:
+ description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow.
+ type: object
+ properties:
+ acmeDNS:
+ description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records.
+ type: object
+ required:
+ - accountSecretRef
+ - host
+ properties:
+ accountSecretRef:
+ description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ host:
+ type: string
+ akamai:
+ description: Use the Akamai DNS zone management API to manage DNS01 challenge records.
+ type: object
+ required:
+ - accessTokenSecretRef
+ - clientSecretSecretRef
+ - clientTokenSecretRef
+ - serviceConsumerDomain
+ properties:
+ accessTokenSecretRef:
+ description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ clientSecretSecretRef:
+ description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ clientTokenSecretRef:
+ description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ serviceConsumerDomain:
+ type: string
+ azureDNS:
+ description: Use the Microsoft Azure DNS API to manage DNS01 challenge records.
+ type: object
+ required:
+ - resourceGroupName
+ - subscriptionID
+ properties:
+ clientID:
+ description: if both this and ClientSecret are left unset MSI will be used
+ type: string
+ clientSecretSecretRef:
+ description: if both this and ClientID are left unset MSI will be used
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ environment:
+ description: name of the Azure environment (default AzurePublicCloud)
+ type: string
+ enum:
+ - AzurePublicCloud
+ - AzureChinaCloud
+ - AzureGermanCloud
+ - AzureUSGovernmentCloud
+ hostedZoneName:
+ description: name of the DNS zone that should be used
+ type: string
+ managedIdentity:
+ description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID
+ type: object
+ properties:
+ clientID:
+ description: client ID of the managed identity, can not be used at the same time as resourceID
+ type: string
+ resourceID:
+ description: resource ID of the managed identity, can not be used at the same time as clientID
+ type: string
+ resourceGroupName:
+ description: resource group the DNS zone is located in
+ type: string
+ subscriptionID:
+ description: ID of the Azure subscription
+ type: string
+ tenantID:
+ description: when specifying ClientID and ClientSecret then this field is also needed
+ type: string
+ cloudDNS:
+ description: Use the Google Cloud DNS API to manage DNS01 challenge records.
+ type: object
+ required:
+ - project
+ properties:
+ hostedZoneName:
+ description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone.
+ type: string
+ project:
+ type: string
+ serviceAccountSecretRef:
+ description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ cloudflare:
+ description: Use the Cloudflare API to manage DNS01 challenge records.
+ type: object
+ properties:
+ apiKeySecretRef:
+ description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.'
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ apiTokenSecretRef:
+ description: API token used to authenticate with Cloudflare.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ email:
+ description: Email of the account, only required when using API key based authentication.
+ type: string
+ cnameStrategy:
+ description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones.
+ type: string
+ enum:
+ - None
+ - Follow
+ digitalocean:
+ description: Use the DigitalOcean DNS API to manage DNS01 challenge records.
+ type: object
+ required:
+ - tokenSecretRef
+ properties:
+ tokenSecretRef:
+ description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ rfc2136:
+ description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records.
+ type: object
+ required:
+ - nameserver
+ properties:
+ nameserver:
+ description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required.
+ type: string
+ tsigAlgorithm:
+ description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.'
+ type: string
+ tsigKeyName:
+ description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required.
+ type: string
+ tsigSecretSecretRef:
+ description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ route53:
+ description: Use the AWS Route53 API to manage DNS01 challenge records.
+ type: object
+ required:
+ - region
+ properties:
+ accessKeyID:
+ description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials'
+ type: string
+ accessKeyIDSecretRef:
+ description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials'
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ hostedZoneID:
+ description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call.
+ type: string
+ region:
+ description: Always set the region when using AccessKeyID and SecretAccessKey
+ type: string
+ role:
+ description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata
+ type: string
+ secretAccessKeySecretRef:
+ description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials'
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ webhook:
+ description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records.
+ type: object
+ required:
+ - groupName
+ - solverName
+ properties:
+ config:
+ description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation.
+ x-kubernetes-preserve-unknown-fields: true
+ groupName:
+ description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation.
+ type: string
+ solverName:
+ description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'.
+ type: string
+ http01:
+ description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism.
+ type: object
+ properties:
+ gatewayHTTPRoute:
+ description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future.
+ type: object
+ properties:
+ labels:
+ description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges.
+ type: object
+ additionalProperties:
+ type: string
+ parentRefs:
+ description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways'
+ type: array
+ items:
+ description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid."
+ type: object
+ required:
+ - name
+ properties:
+ group:
+ description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core"
+ type: string
+ default: gateway.networking.k8s.io
+ maxLength: 253
+ pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ kind:
+ description: "Kind is kind of the referent. \n Support: Core (Gateway) \n Support: Implementation-specific (Other Resources)"
+ type: string
+ default: Gateway
+ maxLength: 63
+ minLength: 1
+ pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
+ name:
+ description: "Name is the name of the referent. \n Support: Core"
+ type: string
+ maxLength: 253
+ minLength: 1
+ namespace:
+ description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n Support: Core"
+ type: string
+ maxLength: 63
+ minLength: 1
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
+ port:
+ description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n <gateway:experimental>"
+ type: integer
+ format: int32
+ maximum: 65535
+ minimum: 1
+ sectionName:
+ description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core"
+ type: string
+ maxLength: 253
+ minLength: 1
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ serviceType:
+ description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort.
+ type: string
+ ingress:
+ description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed.
+ type: object
+ properties:
+ class:
+ description: This field configures the annotation `kubernetes.io/ingress.class` when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of `class`, `name` or `ingressClassName` may be specified.
+ type: string
+ ingressClassName:
+ description: This field configures the field `ingressClassName` on the created Ingress resources used to solve ACME challenges that use this challenge solver. This is the recommended way of configuring the ingress class. Only one of `class`, `name` or `ingressClassName` may be specified.
+ type: string
+ ingressTemplate:
+ description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges.
+ type: object
+ properties:
+ metadata:
+ description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values.
+ type: object
+ properties:
+ annotations:
+ description: Annotations that should be added to the created ACME HTTP01 solver ingress.
+ type: object
+ additionalProperties:
+ type: string
+ labels:
+ description: Labels that should be added to the created ACME HTTP01 solver ingress.
+ type: object
+ additionalProperties:
+ type: string
+ name:
+ description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. Only one of `class`, `name` or `ingressClassName` may be specified.
+ type: string
+ podTemplate:
+ description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges.
+ type: object
+ properties:
+ metadata:
+ description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values.
+ type: object
+ properties:
+ annotations:
+ description: Annotations that should be added to the create ACME HTTP01 solver pods.
+ type: object
+ additionalProperties:
+ type: string
+ labels:
+ description: Labels that should be added to the created ACME HTTP01 solver pods.
+ type: object
+ additionalProperties:
+ type: string
+ spec:
+ description: PodSpec defines overrides for the HTTP01 challenge solver pod. Check ACMEChallengeSolverHTTP01IngressPodSpec to find out currently supported fields. All other fields will be ignored.
+ type: object
+ properties:
+ affinity:
+ description: If specified, the pod's scheduling constraints
+ type: object
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling rules for the pod.
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
+ type: array
+ items:
+ description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ type: object
+ required:
+ - preference
+ - weight
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding weight.
+ type: object
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ type: array
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ type: array
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ x-kubernetes-map-type: atomic
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ type: integer
+ format: int32
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
+ type: object
+ required:
+ - nodeSelectorTerms
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are ORed.
+ type: array
+ items:
+ description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ type: object
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ type: array
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ type: array
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ x-kubernetes-map-type: atomic
+ x-kubernetes-map-type: atomic
+ podAffinity:
+ description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ type: array
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ type: object
+ required:
+ - podAffinityTerm
+ - weight
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ type: object
+ required:
+ - topologyKey
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ type: array
+ items:
+ type: string
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ type: integer
+ format: int32
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ type: array
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ type: object
+ required:
+ - topologyKey
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ type: array
+ items:
+ type: string
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ podAntiAffinity:
+ description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ type: array
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ type: object
+ required:
+ - podAffinityTerm
+ - weight
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ type: object
+ required:
+ - topologyKey
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ type: array
+ items:
+ type: string
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ type: integer
+ format: int32
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ type: array
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ type: object
+ required:
+ - topologyKey
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ type: array
+ items:
+ type: string
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ imagePullSecrets:
+ description: If specified, the pod's imagePullSecrets
+ type: array
+ items:
+ description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.
+ type: object
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ x-kubernetes-map-type: atomic
+ nodeSelector:
+ description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/'
+ type: object
+ additionalProperties:
+ type: string
+ priorityClassName:
+ description: If specified, the pod's priorityClassName.
+ type: string
+ serviceAccountName:
+ description: If specified, the pod's service account
+ type: string
+ tolerations:
+ description: If specified, the pod's tolerations.
+ type: array
+ items:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
+ type: object
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ type: integer
+ format: int64
+ value:
+ description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ serviceType:
+ description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort.
+ type: string
+ selector:
+ description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead.
+ type: object
+ properties:
+ dnsNames:
+ description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected.
+ type: array
+ items:
+ type: string
+ dnsZones:
+ description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to.
+ type: object
+ additionalProperties:
+ type: string
+ ca:
+ description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager.
+ type: object
+ required:
+ - secretName
+ properties:
+ crlDistributionPoints:
+ description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set.
+ type: array
+ items:
+ type: string
+ ocspServers:
+ description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org".
+ type: array
+ items:
+ type: string
+ secretName:
+ description: SecretName is the name of the secret used to sign Certificates issued by this Issuer.
+ type: string
+ selfSigned:
+ description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object.
+ type: object
+ properties:
+ crlDistributionPoints:
+ description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings.
+ type: array
+ items:
+ type: string
+ vault:
+ description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend.
+ type: object
+ required:
+ - auth
+ - path
+ - server
+ properties:
+ auth:
+ description: Auth configures how cert-manager authenticates with the Vault server.
+ type: object
+ properties:
+ appRole:
+ description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource.
+ type: object
+ required:
+ - path
+ - roleId
+ - secretRef
+ properties:
+ path:
+ description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"'
+ type: string
+ roleId:
+ description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault.
+ type: string
+ secretRef:
+ description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ kubernetes:
+ description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server.
+ type: object
+ required:
+ - role
+ properties:
+ mountPath:
+ description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used.
+ type: string
+ role:
+ description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies.
+ type: string
+ secretRef:
+ description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ serviceAccountRef:
+ description: A reference to a service account that will be used to request a bound token (also known as "projected token"). Compared to using "secretRef", using this field means that you don't rely on statically bound tokens. To use this field, you must configure an RBAC rule to let cert-manager request a token.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: Name of the ServiceAccount used to request a token.
+ type: string
+ tokenSecretRef:
+ description: TokenSecretRef authenticates with Vault by presenting a token.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ caBundle:
+ description: Base64-encoded bundle of PEM CAs which will be used to validate the certificate chain presented by Vault. Only used if using HTTPS to connect to Vault and ignored for HTTP connections. Mutually exclusive with CABundleSecretRef. If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in the cert-manager controller container is used to validate the TLS connection.
+ type: string
+ format: byte
+ caBundleSecretRef:
+ description: Reference to a Secret containing a bundle of PEM-encoded CAs to use when verifying the certificate chain presented by Vault when using HTTPS. Mutually exclusive with CABundle. If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in the cert-manager controller container is used to validate the TLS connection. If no key for the Secret is specified, cert-manager will default to 'ca.crt'.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ namespace:
+ description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces'
+ type: string
+ path:
+ description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".'
+ type: string
+ server:
+ description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".'
+ type: string
+ venafi:
+ description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone.
+ type: object
+ required:
+ - zone
+ properties:
+ cloud:
+ description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified.
+ type: object
+ required:
+ - apiTokenSecretRef
+ properties:
+ apiTokenSecretRef:
+ description: APITokenSecretRef is a secret key selector for the Venafi Cloud API token.
+ type: object
+ required:
+ - name
+ properties:
+ key:
+ description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
+ type: string
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ url:
+ description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1".
+ type: string
+ tpp:
+ description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified.
+ type: object
+ required:
+ - credentialsRef
+ - url
+ properties:
+ caBundle:
+ description: Base64-encoded bundle of PEM CAs which will be used to validate the certificate chain presented by the TPP server. Only used if using HTTPS; ignored for HTTP. If undefined, the certificate bundle in the cert-manager controller container is used to validate the chain.
+ type: string
+ format: byte
+ credentialsRef:
+ description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ url:
+ description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".'
+ type: string
+ zone:
+ description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required.
+ type: string
+ status:
+ description: Status of the Issuer. This is set and managed automatically.
+ type: object
+ properties:
+ acme:
+ description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates.
+ type: object
+ properties:
+ lastPrivateKeyHash:
+ description: LastPrivateKeyHash is a hash of the private key associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer
+ type: string
+ lastRegisteredEmail:
+ description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer
+ type: string
+ uri:
+ description: URI is the unique account identifier, which can also be used to retrieve account details from the CA
+ type: string
+ conditions:
+ description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`.
+ type: array
+ items:
+ description: IssuerCondition contains condition information for an Issuer.
+ type: object
+ required:
+ - status
+ - type
+ properties:
+ lastTransitionTime:
+ description: LastTransitionTime is the timestamp corresponding to the last status change of this condition.
+ type: string
+ format: date-time
+ message:
+ description: Message is a human readable description of the details of the last transition, complementing reason.
+ type: string
+ observedGeneration:
+ description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer.
+ type: integer
+ format: int64
+ reason:
+ description: Reason is a brief machine readable explanation for the condition's last transition.
+ type: string
+ status:
+ description: Status of the condition, one of (`True`, `False`, `Unknown`).
+ type: string
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type:
+ description: Type of the condition, known values are (`Ready`).
+ type: string
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ served: true
+ storage: true
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: orders.acme.cert-manager.io
+ labels:
+ app: '{{ template "cert-manager.name" . }}'
+ app.kubernetes.io/name: '{{ template "cert-manager.name" . }}'
+ app.kubernetes.io/instance: '{{ .Release.Name }}'
+ # Generated labels {{- include "labels" . | nindent 4 }}
+spec:
+ group: acme.cert-manager.io
+ names:
+ kind: Order
+ listKind: OrderList
+ plural: orders
+ singular: order
+ categories:
+ - cert-manager
+ - cert-manager-acme
+ scope: Namespaced
+ versions:
+ - name: v1
+ subresources:
+ status: {}
+ additionalPrinterColumns:
+ - jsonPath: .status.state
+ name: State
+ type: string
+ - jsonPath: .spec.issuerRef.name
+ name: Issuer
+ priority: 1
+ type: string
+ - jsonPath: .status.reason
+ name: Reason
+ priority: 1
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.
+ name: Age
+ type: date
+ schema:
+ openAPIV3Schema:
+ description: Order is a type to represent an Order with an ACME server
+ type: object
+ required:
+ - metadata
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ type: object
+ required:
+ - issuerRef
+ - request
+ properties:
+ commonName:
+ description: CommonName is the common name as specified on the DER encoded CSR. If specified, this value must also be present in `dnsNames` or `ipAddresses`. This field must match the corresponding field on the DER encoded CSR.
+ type: string
+ dnsNames:
+ description: DNSNames is a list of DNS names that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR.
+ type: array
+ items:
+ type: string
+ duration:
+ description: Duration is the duration for the not after date for the requested certificate. this is set on order creation as pe the ACME spec.
+ type: string
+ ipAddresses:
+ description: IPAddresses is a list of IP addresses that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR.
+ type: array
+ items:
+ type: string
+ issuerRef:
+ description: IssuerRef references a properly configured ACME-type Issuer which should be used to create this Order. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Order will be marked as failed.
+ type: object
+ required:
+ - name
+ properties:
+ group:
+ description: Group of the resource being referred to.
+ type: string
+ kind:
+ description: Kind of the resource being referred to.
+ type: string
+ name:
+ description: Name of the resource being referred to.
+ type: string
+ request:
+ description: Certificate signing request bytes in DER encoding. This will be used when finalizing the order. This field must be set on the order.
+ type: string
+ format: byte
+ status:
+ type: object
+ properties:
+ authorizations:
+ description: Authorizations contains data returned from the ACME server on what authorizations must be completed in order to validate the DNS names specified on the Order.
+ type: array
+ items:
+ description: ACMEAuthorization contains data returned from the ACME server on an authorization that must be completed in order validate a DNS name on an ACME Order resource.
+ type: object
+ required:
+ - url
+ properties:
+ challenges:
+ description: Challenges specifies the challenge types offered by the ACME server. One of these challenge types will be selected when validating the DNS name and an appropriate Challenge resource will be created to perform the ACME challenge process.
+ type: array
+ items:
+ description: Challenge specifies a challenge offered by the ACME server for an Order. An appropriate Challenge resource can be created to perform the ACME challenge process.
+ type: object
+ required:
+ - token
+ - type
+ - url
+ properties:
+ token:
+ description: Token is the token that must be presented for this challenge. This is used to compute the 'key' that must also be presented.
+ type: string
+ type:
+ description: Type is the type of challenge being offered, e.g. 'http-01', 'dns-01', 'tls-sni-01', etc. This is the raw value retrieved from the ACME server. Only 'http-01' and 'dns-01' are supported by cert-manager, other values will be ignored.
+ type: string
+ url:
+ description: URL is the URL of this challenge. It can be used to retrieve additional metadata about the Challenge from the ACME server.
+ type: string
+ identifier:
+ description: Identifier is the DNS name to be validated as part of this authorization
+ type: string
+ initialState:
+ description: InitialState is the initial state of the ACME authorization when first fetched from the ACME server. If an Authorization is already 'valid', the Order controller will not create a Challenge resource for the authorization. This will occur when working with an ACME server that enables 'authz reuse' (such as Let's Encrypt's production endpoint). If not set and 'identifier' is set, the state is assumed to be pending and a Challenge will be created.
+ type: string
+ enum:
+ - valid
+ - ready
+ - pending
+ - processing
+ - invalid
+ - expired
+ - errored
+ url:
+ description: URL is the URL of the Authorization that must be completed
+ type: string
+ wildcard:
+ description: Wildcard will be true if this authorization is for a wildcard DNS name. If this is true, the identifier will be the *non-wildcard* version of the DNS name. For example, if '*.example.com' is the DNS name being validated, this field will be 'true' and the 'identifier' field will be 'example.com'.
+ type: boolean
+ certificate:
+ description: Certificate is a copy of the PEM encoded certificate for this Order. This field will be populated after the order has been successfully finalized with the ACME server, and the order has transitioned to the 'valid' state.
+ type: string
+ format: byte
+ failureTime:
+ description: FailureTime stores the time that this order failed. This is used to influence garbage collection and back-off.
+ type: string
+ format: date-time
+ finalizeURL:
+ description: FinalizeURL of the Order. This is used to obtain certificates for this order once it has been completed.
+ type: string
+ reason:
+ description: Reason optionally provides more information about a why the order is in the current state.
+ type: string
+ state:
+ description: State contains the current state of this Order resource. States 'success' and 'expired' are 'final'
+ type: string
+ enum:
+ - valid
+ - ready
+ - pending
+ - processing
+ - invalid
+ - expired
+ - errored
+ url:
+ description: URL of the Order. This will initially be empty when the resource is first created. The Order controller will populate this field when the Order is first processed. This field will be immutable after it is initially set.
+ type: string
+ served: true
+ storage: true
+{{- end }}
diff --git a/charts/cert-manager/templates/deployment.yaml b/charts/cert-manager/templates/deployment.yaml
new file mode 100644
index 0000000..aea5736
--- /dev/null
+++ b/charts/cert-manager/templates/deployment.yaml
@@ -0,0 +1,204 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ template "cert-manager.fullname" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ labels:
+ app: {{ template "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ template "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+ {{- with .Values.deploymentAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ template "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- with .Values.strategy }}
+ strategy:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ template:
+ metadata:
+ labels:
+ app: {{ template "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ template "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 8 }}
+ {{- with .Values.podLabels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- if and .Values.prometheus.enabled (not .Values.prometheus.servicemonitor.enabled) }}
+ {{- if not .Values.podAnnotations }}
+ annotations:
+ {{- end }}
+ prometheus.io/path: "/metrics"
+ prometheus.io/scrape: 'true'
+ prometheus.io/port: '9402'
+ {{- end }}
+ spec:
+ serviceAccountName: {{ template "cert-manager.serviceAccountName" . }}
+ {{- if hasKey .Values "automountServiceAccountToken" }}
+ automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
+ {{- end }}
+ {{- with .Values.global.priorityClassName }}
+ priorityClassName: {{ . | quote }}
+ {{- end }}
+ {{- with .Values.securityContext }}
+ securityContext:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.volumes }}
+ volumes:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: {{ .Chart.Name }}-controller
+ {{- with .Values.image }}
+ image: "{{- if .registry -}}{{ .registry }}/{{- end -}}{{ .repository }}{{- if (.digest) -}} @{{ .digest }}{{- else -}}:{{ default $.Chart.AppVersion .tag }} {{- end -}}"
+ {{- end }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ args:
+ {{- if .Values.global.logLevel }}
+ - --v={{ .Values.global.logLevel }}
+ {{- end }}
+ {{- if .Values.clusterResourceNamespace }}
+ - --cluster-resource-namespace={{ .Values.clusterResourceNamespace }}
+ {{- else }}
+ - --cluster-resource-namespace=$(POD_NAMESPACE)
+ {{- end }}
+ {{- with .Values.global.leaderElection }}
+ - --leader-election-namespace={{ .namespace }}
+ {{- if .leaseDuration }}
+ - --leader-election-lease-duration={{ .leaseDuration }}
+ {{- end }}
+ {{- if .renewDeadline }}
+ - --leader-election-renew-deadline={{ .renewDeadline }}
+ {{- end }}
+ {{- if .retryPeriod }}
+ - --leader-election-retry-period={{ .retryPeriod }}
+ {{- end }}
+ {{- end }}
+ {{- with .Values.acmesolver.image }}
+ - --acme-http01-solver-image={{- if .registry -}}{{ .registry }}/{{- end -}}{{ .repository }}{{- if (.digest) -}} @{{ .digest }}{{- else -}}:{{ default $.Chart.AppVersion .tag }} {{- end -}}
+ {{- end }}
+ {{- with .Values.extraArgs }}
+ {{- toYaml . | nindent 10 }}
+ {{- end }}
+ {{- with .Values.ingressShim }}
+ {{- if .defaultIssuerName }}
+ - --default-issuer-name={{ .defaultIssuerName }}
+ {{- end }}
+ {{- if .defaultIssuerKind }}
+ - --default-issuer-kind={{ .defaultIssuerKind }}
+ {{- end }}
+ {{- if .defaultIssuerGroup }}
+ - --default-issuer-group={{ .defaultIssuerGroup }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.featureGates }}
+ - --feature-gates={{ .Values.featureGates }}
+ {{- end }}
+ {{- if .Values.maxConcurrentChallenges }}
+ - --max-concurrent-challenges={{ .Values.maxConcurrentChallenges }}
+ {{- end }}
+ {{- if .Values.enableCertificateOwnerRef }}
+ - --enable-certificate-owner-ref=true
+ {{- end }}
+ {{- if .Values.dns01RecursiveNameserversOnly }}
+ - --dns01-recursive-nameservers-only=true
+ {{- end }}
+ {{- with .Values.dns01RecursiveNameservers }}
+ - --dns01-recursive-nameservers={{ . }}
+ {{- end }}
+ ports:
+ - containerPort: 9402
+ name: http-metrics
+ protocol: TCP
+ - containerPort: 9403
+ name: http-healthz
+ protocol: TCP
+ {{- with .Values.containerSecurityContext }}
+ securityContext:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.volumeMounts }}
+ volumeMounts:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- with .Values.extraEnv }}
+ {{- toYaml . | nindent 10 }}
+ {{- end }}
+ {{- with .Values.http_proxy }}
+ - name: HTTP_PROXY
+ value: {{ . }}
+ {{- end }}
+ {{- with .Values.https_proxy }}
+ - name: HTTPS_PROXY
+ value: {{ . }}
+ {{- end }}
+ {{- with .Values.no_proxy }}
+ - name: NO_PROXY
+ value: {{ . }}
+ {{- end }}
+ {{- with .Values.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+
+ {{- with .Values.livenessProbe }}
+ {{- if .enabled }}
+ # LivenessProbe settings are based on those used for the Kubernetes
+ # controller-manager. See:
+ # https://github.com/kubernetes/kubernetes/blob/806b30170c61a38fedd54cc9ede4cd6275a1ad3b/cmd/kubeadm/app/util/staticpod/utils.go#L241-L245
+ livenessProbe:
+ httpGet:
+ port: http-healthz
+ path: /livez
+ scheme: HTTP
+ initialDelaySeconds: {{ .initialDelaySeconds }}
+ periodSeconds: {{ .periodSeconds }}
+ timeoutSeconds: {{ .timeoutSeconds }}
+ successThreshold: {{ .successThreshold }}
+ failureThreshold: {{ .failureThreshold }}
+ {{- end }}
+ {{- end }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.topologySpreadConstraints }}
+ topologySpreadConstraints:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.podDnsPolicy }}
+ dnsPolicy: {{ . }}
+ {{- end }}
+ {{- with .Values.podDnsConfig }}
+ dnsConfig:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/charts/cert-manager/templates/networkpolicy-egress.yaml b/charts/cert-manager/templates/networkpolicy-egress.yaml
new file mode 100644
index 0000000..0971200
--- /dev/null
+++ b/charts/cert-manager/templates/networkpolicy-egress.yaml
@@ -0,0 +1,23 @@
+{{- if .Values.webhook.networkPolicy.enabled }}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: {{ template "webhook.fullname" . }}-allow-egress
+ namespace: {{ include "cert-manager.namespace" . }}
+spec:
+ egress:
+ {{- with .Values.webhook.networkPolicy.egress }}
+ {{- toYaml . | nindent 2 }}
+ {{- end }}
+ podSelector:
+ matchLabels:
+ app: {{ include "webhook.name" . }}
+ app.kubernetes.io/name: {{ include "webhook.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "webhook"
+ {{- with .Values.webhook.podLabels }}
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ policyTypes:
+ - Egress
+{{- end }}
diff --git a/charts/cert-manager/templates/networkpolicy-webhooks.yaml b/charts/cert-manager/templates/networkpolicy-webhooks.yaml
new file mode 100644
index 0000000..349877a
--- /dev/null
+++ b/charts/cert-manager/templates/networkpolicy-webhooks.yaml
@@ -0,0 +1,25 @@
+{{- if .Values.webhook.networkPolicy.enabled }}
+
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: {{ template "webhook.fullname" . }}-allow-ingress
+ namespace: {{ include "cert-manager.namespace" . }}
+spec:
+ ingress:
+ {{- with .Values.webhook.networkPolicy.ingress }}
+ {{- toYaml . | nindent 2 }}
+ {{- end }}
+ podSelector:
+ matchLabels:
+ app: {{ include "webhook.name" . }}
+ app.kubernetes.io/name: {{ include "webhook.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "webhook"
+ {{- with .Values.webhook.podLabels }}
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ policyTypes:
+ - Ingress
+
+{{- end }}
diff --git a/charts/cert-manager/templates/poddisruptionbudget.yaml b/charts/cert-manager/templates/poddisruptionbudget.yaml
new file mode 100644
index 0000000..dab75ce
--- /dev/null
+++ b/charts/cert-manager/templates/poddisruptionbudget.yaml
@@ -0,0 +1,26 @@
+{{- if .Values.podDisruptionBudget.enabled }}
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: {{ include "cert-manager.fullname" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+spec:
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+
+ {{- with .Values.podDisruptionBudget.minAvailable }}
+ minAvailable: {{ . }}
+ {{- end }}
+ {{- with .Values.podDisruptionBudget.maxUnavailable }}
+ maxUnavailable: {{ . }}
+ {{- end }}
+{{- end }}
diff --git a/charts/cert-manager/templates/psp-clusterrole.yaml b/charts/cert-manager/templates/psp-clusterrole.yaml
new file mode 100644
index 0000000..1d40a02
--- /dev/null
+++ b/charts/cert-manager/templates/psp-clusterrole.yaml
@@ -0,0 +1,18 @@
+{{- if .Values.global.podSecurityPolicy.enabled }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ template "cert-manager.fullname" . }}-psp
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+rules:
+- apiGroups: ['policy']
+ resources: ['podsecuritypolicies']
+ verbs: ['use']
+ resourceNames:
+ - {{ template "cert-manager.fullname" . }}
+{{- end }}
diff --git a/charts/cert-manager/templates/psp-clusterrolebinding.yaml b/charts/cert-manager/templates/psp-clusterrolebinding.yaml
new file mode 100644
index 0000000..4f09b6b
--- /dev/null
+++ b/charts/cert-manager/templates/psp-clusterrolebinding.yaml
@@ -0,0 +1,20 @@
+{{- if .Values.global.podSecurityPolicy.enabled }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "cert-manager.fullname" . }}-psp
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "cert-manager.fullname" . }}-psp
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "cert-manager.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+{{- end }}
diff --git a/charts/cert-manager/templates/psp.yaml b/charts/cert-manager/templates/psp.yaml
new file mode 100644
index 0000000..9e99f5c
--- /dev/null
+++ b/charts/cert-manager/templates/psp.yaml
@@ -0,0 +1,49 @@
+{{- if .Values.global.podSecurityPolicy.enabled }}
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: {{ template "cert-manager.fullname" . }}
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+ annotations:
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
+ seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+ {{- if .Values.global.podSecurityPolicy.useAppArmor }}
+ apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
+ apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+ {{- end }}
+spec:
+ privileged: false
+ allowPrivilegeEscalation: false
+ allowedCapabilities: [] # default set of capabilities are implicitly allowed
+ volumes:
+ - 'configMap'
+ - 'emptyDir'
+ - 'projected'
+ - 'secret'
+ - 'downwardAPI'
+ hostNetwork: false
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1000
+ max: 1000
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1000
+ max: 1000
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1000
+ max: 1000
+{{- end }}
diff --git a/charts/cert-manager/templates/rbac.yaml b/charts/cert-manager/templates/rbac.yaml
new file mode 100644
index 0000000..830e372
--- /dev/null
+++ b/charts/cert-manager/templates/rbac.yaml
@@ -0,0 +1,544 @@
+{{- if .Values.global.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ template "cert-manager.fullname" . }}:leaderelection
+ namespace: {{ .Values.global.leaderElection.namespace }}
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+rules:
+ - apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ resourceNames: ["cert-manager-controller"]
+ verbs: ["get", "update", "patch"]
+ - apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ verbs: ["create"]
+
+---
+
+# grant cert-manager permission to manage the leaderelection configmap in the
+# leader election namespace
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ include "cert-manager.fullname" . }}:leaderelection
+ namespace: {{ .Values.global.leaderElection.namespace }}
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ template "cert-manager.fullname" . }}:leaderelection
+subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: {{ template "cert-manager.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+
+---
+
+# Issuer controller role
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "cert-manager.fullname" . }}-controller-issuers
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+rules:
+ - apiGroups: ["cert-manager.io"]
+ resources: ["issuers", "issuers/status"]
+ verbs: ["update", "patch"]
+ - apiGroups: ["cert-manager.io"]
+ resources: ["issuers"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "list", "watch", "create", "update", "delete"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "patch"]
+---
+
+# ClusterIssuer controller role
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "cert-manager.fullname" . }}-controller-clusterissuers
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+rules:
+ - apiGroups: ["cert-manager.io"]
+ resources: ["clusterissuers", "clusterissuers/status"]
+ verbs: ["update", "patch"]
+ - apiGroups: ["cert-manager.io"]
+ resources: ["clusterissuers"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "list", "watch", "create", "update", "delete"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "patch"]
+
+---
+
+# Certificates controller role
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "cert-manager.fullname" . }}-controller-certificates
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+rules:
+ - apiGroups: ["cert-manager.io"]
+ resources: ["certificates", "certificates/status", "certificaterequests", "certificaterequests/status"]
+ verbs: ["update", "patch"]
+ - apiGroups: ["cert-manager.io"]
+ resources: ["certificates", "certificaterequests", "clusterissuers", "issuers"]
+ verbs: ["get", "list", "watch"]
+ # We require these rules to support users with the OwnerReferencesPermissionEnforcement
+ # admission controller enabled:
+ # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement
+ - apiGroups: ["cert-manager.io"]
+ resources: ["certificates/finalizers", "certificaterequests/finalizers"]
+ verbs: ["update"]
+ - apiGroups: ["acme.cert-manager.io"]
+ resources: ["orders"]
+ verbs: ["create", "delete", "get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "patch"]
+
+---
+
+# Orders controller role
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "cert-manager.fullname" . }}-controller-orders
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+rules:
+ - apiGroups: ["acme.cert-manager.io"]
+ resources: ["orders", "orders/status"]
+ verbs: ["update", "patch"]
+ - apiGroups: ["acme.cert-manager.io"]
+ resources: ["orders", "challenges"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["cert-manager.io"]
+ resources: ["clusterissuers", "issuers"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["acme.cert-manager.io"]
+ resources: ["challenges"]
+ verbs: ["create", "delete"]
+ # We require these rules to support users with the OwnerReferencesPermissionEnforcement
+ # admission controller enabled:
+ # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement
+ - apiGroups: ["acme.cert-manager.io"]
+ resources: ["orders/finalizers"]
+ verbs: ["update"]
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "patch"]
+
+---
+
+# Challenges controller role
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "cert-manager.fullname" . }}-controller-challenges
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+rules:
+ # Use to update challenge resource status
+ - apiGroups: ["acme.cert-manager.io"]
+ resources: ["challenges", "challenges/status"]
+ verbs: ["update", "patch"]
+ # Used to watch challenge resources
+ - apiGroups: ["acme.cert-manager.io"]
+ resources: ["challenges"]
+ verbs: ["get", "list", "watch"]
+ # Used to watch challenges, issuer and clusterissuer resources
+ - apiGroups: ["cert-manager.io"]
+ resources: ["issuers", "clusterissuers"]
+ verbs: ["get", "list", "watch"]
+ # Need to be able to retrieve ACME account private key to complete challenges
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "list", "watch"]
+ # Used to create events
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "patch"]
+ # HTTP01 rules
+ - apiGroups: [""]
+ resources: ["pods", "services"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: ["networking.k8s.io"]
+ resources: ["ingresses"]
+ verbs: ["get", "list", "watch", "create", "delete", "update"]
+ - apiGroups: [ "gateway.networking.k8s.io" ]
+ resources: [ "httproutes" ]
+ verbs: ["get", "list", "watch", "create", "delete", "update"]
+ # We require the ability to specify a custom hostname when we are creating
+ # new ingress resources.
+ # See: https://github.com/openshift/origin/blob/21f191775636f9acadb44fa42beeb4f75b255532/pkg/route/apiserver/admission/ingress_admission.go#L84-L148
+ - apiGroups: ["route.openshift.io"]
+ resources: ["routes/custom-host"]
+ verbs: ["create"]
+ # We require these rules to support users with the OwnerReferencesPermissionEnforcement
+ # admission controller enabled:
+ # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement
+ - apiGroups: ["acme.cert-manager.io"]
+ resources: ["challenges/finalizers"]
+ verbs: ["update"]
+ # DNS01 rules (duplicated above)
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "list", "watch"]
+
+---
+
+# ingress-shim controller role
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "cert-manager.fullname" . }}-controller-ingress-shim
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+rules:
+ - apiGroups: ["cert-manager.io"]
+ resources: ["certificates", "certificaterequests"]
+ verbs: ["create", "update", "delete"]
+ - apiGroups: ["cert-manager.io"]
+ resources: ["certificates", "certificaterequests", "issuers", "clusterissuers"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["networking.k8s.io"]
+ resources: ["ingresses"]
+ verbs: ["get", "list", "watch"]
+ # We require these rules to support users with the OwnerReferencesPermissionEnforcement
+ # admission controller enabled:
+ # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement
+ - apiGroups: ["networking.k8s.io"]
+ resources: ["ingresses/finalizers"]
+ verbs: ["update"]
+ - apiGroups: ["gateway.networking.k8s.io"]
+ resources: ["gateways", "httproutes"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["gateway.networking.k8s.io"]
+ resources: ["gateways/finalizers", "httproutes/finalizers"]
+ verbs: ["update"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "patch"]
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "cert-manager.fullname" . }}-controller-issuers
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "cert-manager.fullname" . }}-controller-issuers
+subjects:
+ - name: {{ template "cert-manager.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ kind: ServiceAccount
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "cert-manager.fullname" . }}-controller-clusterissuers
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "cert-manager.fullname" . }}-controller-clusterissuers
+subjects:
+ - name: {{ template "cert-manager.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ kind: ServiceAccount
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "cert-manager.fullname" . }}-controller-certificates
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "cert-manager.fullname" . }}-controller-certificates
+subjects:
+ - name: {{ template "cert-manager.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ kind: ServiceAccount
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "cert-manager.fullname" . }}-controller-orders
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "cert-manager.fullname" . }}-controller-orders
+subjects:
+ - name: {{ template "cert-manager.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ kind: ServiceAccount
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "cert-manager.fullname" . }}-controller-challenges
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "cert-manager.fullname" . }}-controller-challenges
+subjects:
+ - name: {{ template "cert-manager.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ kind: ServiceAccount
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "cert-manager.fullname" . }}-controller-ingress-shim
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "cert-manager.fullname" . }}-controller-ingress-shim
+subjects:
+ - name: {{ template "cert-manager.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ kind: ServiceAccount
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "cert-manager.fullname" . }}-view
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+ {{- if .Values.global.rbac.aggregateClusterRoles }}
+ rbac.authorization.k8s.io/aggregate-to-view: "true"
+ rbac.authorization.k8s.io/aggregate-to-edit: "true"
+ rbac.authorization.k8s.io/aggregate-to-admin: "true"
+ {{- end }}
+rules:
+ - apiGroups: ["cert-manager.io"]
+ resources: ["certificates", "certificaterequests", "issuers"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["acme.cert-manager.io"]
+ resources: ["challenges", "orders"]
+ verbs: ["get", "list", "watch"]
+
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "cert-manager.fullname" . }}-edit
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+ {{- if .Values.global.rbac.aggregateClusterRoles }}
+ rbac.authorization.k8s.io/aggregate-to-edit: "true"
+ rbac.authorization.k8s.io/aggregate-to-admin: "true"
+ {{- end }}
+rules:
+ - apiGroups: ["cert-manager.io"]
+ resources: ["certificates", "certificaterequests", "issuers"]
+ verbs: ["create", "delete", "deletecollection", "patch", "update"]
+ - apiGroups: ["cert-manager.io"]
+ resources: ["certificates/status"]
+ verbs: ["update"]
+ - apiGroups: ["acme.cert-manager.io"]
+ resources: ["challenges", "orders"]
+ verbs: ["create", "delete", "deletecollection", "patch", "update"]
+
+---
+
+# Permission to approve CertificateRequests referencing cert-manager.io Issuers and ClusterIssuers
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "cert-manager.fullname" . }}-controller-approve:cert-manager-io
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "cert-manager"
+ {{- include "labels" . | nindent 4 }}
+rules:
+ - apiGroups: ["cert-manager.io"]
+ resources: ["signers"]
+ verbs: ["approve"]
+ resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"]
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "cert-manager.fullname" . }}-controller-approve:cert-manager-io
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "cert-manager"
+ {{- include "labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "cert-manager.fullname" . }}-controller-approve:cert-manager-io
+subjects:
+ - name: {{ template "cert-manager.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ kind: ServiceAccount
+
+---
+
+# Permission to:
+# - Update and sign CertificatSigningeRequests referencing cert-manager.io Issuers and ClusterIssuers
+# - Perform SubjectAccessReviews to test whether users are able to reference Namespaced Issuers
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "cert-manager.fullname" . }}-controller-certificatesigningrequests
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "cert-manager"
+ {{- include "labels" . | nindent 4 }}
+rules:
+ - apiGroups: ["certificates.k8s.io"]
+ resources: ["certificatesigningrequests"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["certificates.k8s.io"]
+ resources: ["certificatesigningrequests/status"]
+ verbs: ["update", "patch"]
+ - apiGroups: ["certificates.k8s.io"]
+ resources: ["signers"]
+ resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"]
+ verbs: ["sign"]
+ - apiGroups: ["authorization.k8s.io"]
+ resources: ["subjectaccessreviews"]
+ verbs: ["create"]
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "cert-manager.fullname" . }}-controller-certificatesigningrequests
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "cert-manager"
+ {{- include "labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "cert-manager.fullname" . }}-controller-certificatesigningrequests
+subjects:
+ - name: {{ template "cert-manager.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ kind: ServiceAccount
+{{- end }}
diff --git a/charts/cert-manager/templates/service.yaml b/charts/cert-manager/templates/service.yaml
new file mode 100644
index 0000000..ec34d58
--- /dev/null
+++ b/charts/cert-manager/templates/service.yaml
@@ -0,0 +1,31 @@
+{{- if .Values.prometheus.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "cert-manager.fullname" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+{{- with .Values.serviceAnnotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+ {{- with .Values.serviceLabels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ type: ClusterIP
+ ports:
+ - protocol: TCP
+ port: 9402
+ name: tcp-prometheus-servicemonitor
+ targetPort: {{ .Values.prometheus.servicemonitor.targetPort }}
+ selector:
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+{{- end }}
diff --git a/charts/cert-manager/templates/serviceaccount.yaml b/charts/cert-manager/templates/serviceaccount.yaml
new file mode 100644
index 0000000..6026842
--- /dev/null
+++ b/charts/cert-manager/templates/serviceaccount.yaml
@@ -0,0 +1,25 @@
+{{- if .Values.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+{{- with .Values.global.imagePullSecrets }}
+imagePullSecrets:
+ {{- toYaml . | nindent 2 }}
+{{- end }}
+automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
+metadata:
+ name: {{ template "cert-manager.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.labels }}
+ {{ toYaml . | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/cert-manager/templates/servicemonitor.yaml b/charts/cert-manager/templates/servicemonitor.yaml
new file mode 100644
index 0000000..9d9e899
--- /dev/null
+++ b/charts/cert-manager/templates/servicemonitor.yaml
@@ -0,0 +1,45 @@
+{{- if and .Values.prometheus.enabled .Values.prometheus.servicemonitor.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: {{ template "cert-manager.fullname" . }}
+{{- if .Values.prometheus.servicemonitor.namespace }}
+ namespace: {{ .Values.prometheus.servicemonitor.namespace }}
+{{- else }}
+ namespace: {{ include "cert-manager.namespace" . }}
+{{- end }}
+ labels:
+ app: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/name: {{ include "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+ {{- include "labels" . | nindent 4 }}
+ prometheus: {{ .Values.prometheus.servicemonitor.prometheusInstance }}
+ {{- with .Values.prometheus.servicemonitor.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- if .Values.prometheus.servicemonitor.annotations }}
+ annotations:
+ {{- with .Values.prometheus.servicemonitor.annotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end }}
+spec:
+ jobLabel: {{ template "cert-manager.fullname" . }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ template "cert-manager.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "controller"
+{{- if .Values.prometheus.servicemonitor.namespace }}
+ namespaceSelector:
+ matchNames:
+ - {{ include "cert-manager.namespace" . }}
+{{- end }}
+ endpoints:
+ - targetPort: {{ .Values.prometheus.servicemonitor.targetPort }}
+ path: {{ .Values.prometheus.servicemonitor.path }}
+ interval: {{ .Values.prometheus.servicemonitor.interval }}
+ scrapeTimeout: {{ .Values.prometheus.servicemonitor.scrapeTimeout }}
+ honorLabels: {{ .Values.prometheus.servicemonitor.honorLabels }}
+{{- end }}
diff --git a/charts/cert-manager/templates/startupapicheck-job.yaml b/charts/cert-manager/templates/startupapicheck-job.yaml
new file mode 100644
index 0000000..a9b965e
--- /dev/null
+++ b/charts/cert-manager/templates/startupapicheck-job.yaml
@@ -0,0 +1,88 @@
+{{- if .Values.startupapicheck.enabled }}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ include "startupapicheck.fullname" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ labels:
+ app: {{ include "startupapicheck.name" . }}
+ app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "startupapicheck"
+ {{- include "labels" . | nindent 4 }}
+ {{- with .Values.startupapicheck.jobAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ backoffLimit: {{ .Values.startupapicheck.backoffLimit }}
+ template:
+ metadata:
+ labels:
+ app: {{ include "startupapicheck.name" . }}
+ app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "startupapicheck"
+ {{- include "labels" . | nindent 8 }}
+ {{- with .Values.startupapicheck.podLabels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.startupapicheck.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ spec:
+ restartPolicy: OnFailure
+ serviceAccountName: {{ template "startupapicheck.serviceAccountName" . }}
+ {{- if hasKey .Values.startupapicheck "automountServiceAccountToken" }}
+ automountServiceAccountToken: {{ .Values.startupapicheck.automountServiceAccountToken }}
+ {{- end }}
+ {{- with .Values.global.priorityClassName }}
+ priorityClassName: {{ . | quote }}
+ {{- end }}
+ {{- with .Values.startupapicheck.securityContext }}
+ securityContext:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: {{ .Chart.Name }}-startupapicheck
+ {{- with .Values.startupapicheck.image }}
+ image: "{{- if .registry -}}{{ .registry }}/{{- end -}}{{ .repository }}{{- if (.digest) -}} @{{ .digest }}{{- else -}}:{{ default $.Chart.AppVersion .tag }} {{- end -}}"
+ {{- end }}
+ imagePullPolicy: {{ .Values.startupapicheck.image.pullPolicy }}
+ args:
+ - check
+ - api
+ - --wait={{ .Values.startupapicheck.timeout }}
+ {{- with .Values.startupapicheck.extraArgs }}
+ {{- toYaml . | nindent 10 }}
+ {{- end }}
+ {{- with .Values.startupapicheck.containerSecurityContext }}
+ securityContext:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.startupapicheck.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.startupapicheck.volumeMounts }}
+ volumeMounts:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.startupapicheck.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.startupapicheck.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.startupapicheck.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.startupapicheck.volumes }}
+ volumes:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/cert-manager/templates/startupapicheck-psp-clusterrole.yaml b/charts/cert-manager/templates/startupapicheck-psp-clusterrole.yaml
new file mode 100644
index 0000000..dacd4be
--- /dev/null
+++ b/charts/cert-manager/templates/startupapicheck-psp-clusterrole.yaml
@@ -0,0 +1,24 @@
+{{- if .Values.startupapicheck.enabled }}
+{{- if .Values.global.podSecurityPolicy.enabled }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ template "startupapicheck.fullname" . }}-psp
+ labels:
+ app: {{ include "startupapicheck.name" . }}
+ app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "startupapicheck"
+ {{- include "labels" . | nindent 4 }}
+ {{- with .Values.startupapicheck.rbac.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+rules:
+- apiGroups: ['policy']
+ resources: ['podsecuritypolicies']
+ verbs: ['use']
+ resourceNames:
+ - {{ template "startupapicheck.fullname" . }}
+{{- end }}
+{{- end }}
diff --git a/charts/cert-manager/templates/startupapicheck-psp-clusterrolebinding.yaml b/charts/cert-manager/templates/startupapicheck-psp-clusterrolebinding.yaml
new file mode 100644
index 0000000..54d5a42
--- /dev/null
+++ b/charts/cert-manager/templates/startupapicheck-psp-clusterrolebinding.yaml
@@ -0,0 +1,26 @@
+{{- if .Values.startupapicheck.enabled }}
+{{- if .Values.global.podSecurityPolicy.enabled }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "startupapicheck.fullname" . }}-psp
+ labels:
+ app: {{ include "startupapicheck.name" . }}
+ app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "startupapicheck"
+ {{- include "labels" . | nindent 4 }}
+ {{- with .Values.startupapicheck.rbac.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "startupapicheck.fullname" . }}-psp
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "startupapicheck.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+{{- end }}
+{{- end }}
diff --git a/charts/cert-manager/templates/startupapicheck-psp.yaml b/charts/cert-manager/templates/startupapicheck-psp.yaml
new file mode 100644
index 0000000..f09d60d
--- /dev/null
+++ b/charts/cert-manager/templates/startupapicheck-psp.yaml
@@ -0,0 +1,51 @@
+{{- if .Values.startupapicheck.enabled }}
+{{- if .Values.global.podSecurityPolicy.enabled }}
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: {{ template "startupapicheck.fullname" . }}
+ labels:
+ app: {{ include "startupapicheck.name" . }}
+ app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "startupapicheck"
+ {{- include "labels" . | nindent 4 }}
+ annotations:
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
+ seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+ {{- if .Values.global.podSecurityPolicy.useAppArmor }}
+ apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
+ apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+ {{- end }}
+ {{- with .Values.startupapicheck.rbac.annotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ privileged: false
+ allowPrivilegeEscalation: false
+ allowedCapabilities: [] # default set of capabilities are implicitly allowed
+ volumes:
+ - 'projected'
+ - 'secret'
+ hostNetwork: false
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1000
+ max: 1000
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1000
+ max: 1000
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1000
+ max: 1000
+{{- end }}
+{{- end }}
diff --git a/charts/cert-manager/templates/startupapicheck-rbac.yaml b/charts/cert-manager/templates/startupapicheck-rbac.yaml
new file mode 100644
index 0000000..606e725
--- /dev/null
+++ b/charts/cert-manager/templates/startupapicheck-rbac.yaml
@@ -0,0 +1,48 @@
+{{- if .Values.startupapicheck.enabled }}
+{{- if .Values.global.rbac.create }}
+# create certificate role
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ template "startupapicheck.fullname" . }}:create-cert
+ namespace: {{ include "cert-manager.namespace" . }}
+ labels:
+ app: {{ include "startupapicheck.name" . }}
+ app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "startupapicheck"
+ {{- include "labels" . | nindent 4 }}
+ {{- with .Values.startupapicheck.rbac.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+rules:
+ - apiGroups: ["cert-manager.io"]
+ resources: ["certificates"]
+ verbs: ["create"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ include "startupapicheck.fullname" . }}:create-cert
+ namespace: {{ include "cert-manager.namespace" . }}
+ labels:
+ app: {{ include "startupapicheck.name" . }}
+ app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "startupapicheck"
+ {{- include "labels" . | nindent 4 }}
+ {{- with .Values.startupapicheck.rbac.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ template "startupapicheck.fullname" . }}:create-cert
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "startupapicheck.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+{{- end }}
+{{- end }}
diff --git a/charts/cert-manager/templates/startupapicheck-serviceaccount.yaml b/charts/cert-manager/templates/startupapicheck-serviceaccount.yaml
new file mode 100644
index 0000000..8c41760
--- /dev/null
+++ b/charts/cert-manager/templates/startupapicheck-serviceaccount.yaml
@@ -0,0 +1,27 @@
+{{- if .Values.startupapicheck.enabled }}
+{{- if .Values.startupapicheck.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+automountServiceAccountToken: {{ .Values.startupapicheck.serviceAccount.automountServiceAccountToken }}
+metadata:
+ name: {{ template "startupapicheck.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ {{- with .Values.startupapicheck.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ labels:
+ app: {{ include "startupapicheck.name" . }}
+ app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "startupapicheck"
+ {{- include "labels" . | nindent 4 }}
+ {{- with .Values.startupapicheck.serviceAccount.labels }}
+ {{ toYaml . | nindent 4 }}
+ {{- end }}
+{{- with .Values.global.imagePullSecrets }}
+imagePullSecrets:
+ {{- toYaml . | nindent 2 }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/cert-manager/templates/webhook-config.yaml b/charts/cert-manager/templates/webhook-config.yaml
new file mode 100644
index 0000000..f3f72f0
--- /dev/null
+++ b/charts/cert-manager/templates/webhook-config.yaml
@@ -0,0 +1,25 @@
+{{- if .Values.webhook.config -}}
+ {{- if not .Values.webhook.config.apiVersion -}}
+ {{- fail "webhook.config.apiVersion must be set" -}}
+ {{- end -}}
+
+ {{- if not .Values.webhook.config.kind -}}
+ {{- fail "webhook.config.kind must be set" -}}
+ {{- end -}}
+{{- end -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "webhook.fullname" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ labels:
+ app: {{ include "webhook.name" . }}
+ app.kubernetes.io/name: {{ include "webhook.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "webhook"
+ {{- include "labels" . | nindent 4 }}
+data:
+ {{- if .Values.webhook.config }}
+ config.yaml: |
+ {{ .Values.webhook.config | toYaml | nindent 4 }}
+ {{- end }}
diff --git a/charts/cert-manager/templates/webhook-deployment.yaml b/charts/cert-manager/templates/webhook-deployment.yaml
new file mode 100644
index 0000000..043c4b1
--- /dev/null
+++ b/charts/cert-manager/templates/webhook-deployment.yaml
@@ -0,0 +1,185 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "webhook.fullname" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ labels:
+ app: {{ include "webhook.name" . }}
+ app.kubernetes.io/name: {{ include "webhook.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "webhook"
+ {{- include "labels" . | nindent 4 }}
+ {{- with .Values.webhook.deploymentAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ replicas: {{ .Values.webhook.replicaCount }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "webhook.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "webhook"
+ {{- with .Values.webhook.strategy }}
+ strategy:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ template:
+ metadata:
+ labels:
+ app: {{ include "webhook.name" . }}
+ app.kubernetes.io/name: {{ include "webhook.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "webhook"
+ {{- include "labels" . | nindent 8 }}
+ {{- with .Values.webhook.podLabels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.webhook.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ spec:
+ serviceAccountName: {{ template "webhook.serviceAccountName" . }}
+ {{- if hasKey .Values.webhook "automountServiceAccountToken" }}
+ automountServiceAccountToken: {{ .Values.webhook.automountServiceAccountToken }}
+ {{- end }}
+ {{- with .Values.global.priorityClassName }}
+ priorityClassName: {{ . | quote }}
+ {{- end }}
+ {{- with .Values.webhook.securityContext }}
+ securityContext:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- if .Values.webhook.hostNetwork }}
+ hostNetwork: true
+ {{- end }}
+ containers:
+ - name: {{ .Chart.Name }}-webhook
+ {{- with .Values.webhook.image }}
+ image: "{{- if .registry -}}{{ .registry }}/{{- end -}}{{ .repository }}{{- if (.digest) -}} @{{ .digest }}{{- else -}}:{{ default $.Chart.AppVersion .tag }} {{- end -}}"
+ {{- end }}
+ imagePullPolicy: {{ .Values.webhook.image.pullPolicy }}
+ args:
+ {{- if .Values.global.logLevel }}
+ - --v={{ .Values.global.logLevel }}
+ {{- end }}
+ {{- if .Values.webhook.config }}
+ - --config=/var/cert-manager/config/config.yaml
+ {{- end }}
+ {{- $config := default .Values.webhook.config "" }}
+ {{ if not $config.securePort -}}
+ - --secure-port={{ .Values.webhook.securePort }}
+ {{- end }}
+ {{- if .Values.featureGates }}
+ - --feature-gates={{ .Values.featureGates }}
+ {{- end }}
+ {{- $tlsConfig := default $config.tlsConfig "" }}
+ {{ if or (not $config.tlsConfig) (and (not $tlsConfig.dynamic) (not $tlsConfig.filesystem) ) -}}
+ - --dynamic-serving-ca-secret-namespace=$(POD_NAMESPACE)
+ - --dynamic-serving-ca-secret-name={{ template "webhook.fullname" . }}-ca
+ - --dynamic-serving-dns-names={{ template "webhook.fullname" . }}
+ - --dynamic-serving-dns-names={{ template "webhook.fullname" . }}.$(POD_NAMESPACE)
+ - --dynamic-serving-dns-names={{ template "webhook.fullname" . }}.$(POD_NAMESPACE).svc
+ {{ if .Values.webhook.url.host }}
+ - --dynamic-serving-dns-names={{ .Values.webhook.url.host }}
+ {{- end }}
+ {{- end }}
+ {{- with .Values.webhook.extraArgs }}
+ {{- toYaml . | nindent 10 }}
+ {{- end }}
+ ports:
+ - name: https
+ protocol: TCP
+ {{- if $config.securePort }}
+ containerPort: {{ $config.securePort }}
+ {{- else if .Values.webhook.securePort }}
+ containerPort: {{ .Values.webhook.securePort }}
+ {{- else }}
+ containerPort: 6443
+ {{- end }}
+ - name: healthcheck
+ protocol: TCP
+ {{- if $config.healthzPort }}
+ containerPort: {{ $config.healthzPort }}
+ {{- else }}
+ containerPort: 6080
+ {{- end }}
+ livenessProbe:
+ httpGet:
+ path: /livez
+ {{- if $config.healthzPort }}
+ port: {{ $config.healthzPort }}
+ {{- else }}
+ port: 6080
+ {{- end }}
+ scheme: HTTP
+ initialDelaySeconds: {{ .Values.webhook.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.webhook.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.webhook.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.webhook.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.webhook.livenessProbe.failureThreshold }}
+ readinessProbe:
+ httpGet:
+ path: /healthz
+ {{- if $config.healthzPort }}
+ port: {{ $config.healthzPort }}
+ {{- else }}
+ port: 6080
+ {{- end }}
+ scheme: HTTP
+ initialDelaySeconds: {{ .Values.webhook.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.webhook.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.webhook.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.webhook.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.webhook.readinessProbe.failureThreshold }}
+ {{- with .Values.webhook.containerSecurityContext }}
+ securityContext:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- with .Values.webhook.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- if or .Values.webhook.config .Values.webhook.volumeMounts }}
+ volumeMounts:
+ {{- if .Values.webhook.config }}
+ - name: config
+ mountPath: /var/cert-manager/config
+ {{- end }}
+ {{- if .Values.webhook.volumeMounts }}
+ {{- toYaml .Values.webhook.volumeMounts | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- with .Values.webhook.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.webhook.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.webhook.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.webhook.topologySpreadConstraints }}
+ topologySpreadConstraints:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- if or .Values.webhook.config .Values.webhook.volumes }}
+ volumes:
+ {{- if .Values.webhook.config }}
+ - name: config
+ configMap:
+ name: {{ include "webhook.fullname" . }}
+ {{- end }}
+ {{- if .Values.webhook.volumes }}
+ {{- toYaml .Values.webhook.volumes | nindent 8 }}
+ {{- end }}
+ {{- end }}
diff --git a/charts/cert-manager/templates/webhook-mutating-webhook.yaml b/charts/cert-manager/templates/webhook-mutating-webhook.yaml
new file mode 100644
index 0000000..f3db011
--- /dev/null
+++ b/charts/cert-manager/templates/webhook-mutating-webhook.yaml
@@ -0,0 +1,46 @@
+apiVersion: admissionregistration.k8s.io/v1
+kind: MutatingWebhookConfiguration
+metadata:
+ name: {{ include "webhook.fullname" . }}
+ labels:
+ app: {{ include "webhook.name" . }}
+ app.kubernetes.io/name: {{ include "webhook.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "webhook"
+ {{- include "labels" . | nindent 4 }}
+ annotations:
+ cert-manager.io/inject-ca-from-secret: {{ printf "%s/%s-ca" (include "cert-manager.namespace" .) (include "webhook.fullname" .) | quote }}
+ {{- with .Values.webhook.mutatingWebhookConfigurationAnnotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+webhooks:
+ - name: webhook.cert-manager.io
+ rules:
+ - apiGroups:
+ - "cert-manager.io"
+ - "acme.cert-manager.io"
+ apiVersions:
+ - "v1"
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - "*/*"
+ admissionReviewVersions: ["v1"]
+ # This webhook only accepts v1 cert-manager resources.
+ # Equivalent matchPolicy ensures that non-v1 resource requests are sent to
+ # this webhook (after the resources have been converted to v1).
+ matchPolicy: Equivalent
+ timeoutSeconds: {{ .Values.webhook.timeoutSeconds }}
+ failurePolicy: Fail
+ # Only include 'sideEffects' field in Kubernetes 1.12+
+ sideEffects: None
+ clientConfig:
+ {{- if .Values.webhook.url.host }}
+ url: https://{{ .Values.webhook.url.host }}/mutate
+ {{- else }}
+ service:
+ name: {{ template "webhook.fullname" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ path: /mutate
+ {{- end }}
diff --git a/charts/cert-manager/templates/webhook-poddisruptionbudget.yaml b/charts/cert-manager/templates/webhook-poddisruptionbudget.yaml
new file mode 100644
index 0000000..c8a357c
--- /dev/null
+++ b/charts/cert-manager/templates/webhook-poddisruptionbudget.yaml
@@ -0,0 +1,26 @@
+{{- if .Values.webhook.podDisruptionBudget.enabled }}
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: {{ include "webhook.fullname" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ labels:
+ app: {{ include "webhook.name" . }}
+ app.kubernetes.io/name: {{ include "webhook.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "webhook"
+ {{- include "labels" . | nindent 4 }}
+spec:
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "webhook.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "webhook"
+
+ {{- with .Values.webhook.podDisruptionBudget.minAvailable }}
+ minAvailable: {{ . }}
+ {{- end }}
+ {{- with .Values.webhook.podDisruptionBudget.maxUnavailable }}
+ maxUnavailable: {{ . }}
+ {{- end }}
+{{- end }}
diff --git a/charts/cert-manager/templates/webhook-psp-clusterrole.yaml b/charts/cert-manager/templates/webhook-psp-clusterrole.yaml
new file mode 100644
index 0000000..f6fa4c5
--- /dev/null
+++ b/charts/cert-manager/templates/webhook-psp-clusterrole.yaml
@@ -0,0 +1,18 @@
+{{- if .Values.global.podSecurityPolicy.enabled }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ template "webhook.fullname" . }}-psp
+ labels:
+ app: {{ include "webhook.name" . }}
+ app.kubernetes.io/name: {{ include "webhook.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "webhook"
+ {{- include "labels" . | nindent 4 }}
+rules:
+- apiGroups: ['policy']
+ resources: ['podsecuritypolicies']
+ verbs: ['use']
+ resourceNames:
+ - {{ template "webhook.fullname" . }}
+{{- end }}
diff --git a/charts/cert-manager/templates/webhook-psp-clusterrolebinding.yaml b/charts/cert-manager/templates/webhook-psp-clusterrolebinding.yaml
new file mode 100644
index 0000000..858df8f
--- /dev/null
+++ b/charts/cert-manager/templates/webhook-psp-clusterrolebinding.yaml
@@ -0,0 +1,20 @@
+{{- if .Values.global.podSecurityPolicy.enabled }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "webhook.fullname" . }}-psp
+ labels:
+ app: {{ include "webhook.name" . }}
+ app.kubernetes.io/name: {{ include "webhook.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "webhook"
+ {{- include "labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "webhook.fullname" . }}-psp
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "webhook.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+{{- end }}
diff --git a/charts/cert-manager/templates/webhook-psp.yaml b/charts/cert-manager/templates/webhook-psp.yaml
new file mode 100644
index 0000000..4d5d959
--- /dev/null
+++ b/charts/cert-manager/templates/webhook-psp.yaml
@@ -0,0 +1,54 @@
+{{- if .Values.global.podSecurityPolicy.enabled }}
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: {{ template "webhook.fullname" . }}
+ labels:
+ app: {{ include "webhook.name" . }}
+ app.kubernetes.io/name: {{ include "webhook.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "webhook"
+ {{- include "labels" . | nindent 4 }}
+ annotations:
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
+ seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+ {{- if .Values.global.podSecurityPolicy.useAppArmor }}
+ apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
+ apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+ {{- end }}
+spec:
+ privileged: false
+ allowPrivilegeEscalation: false
+ allowedCapabilities: [] # default set of capabilities are implicitly allowed
+ volumes:
+ - 'configMap'
+ - 'emptyDir'
+ - 'projected'
+ - 'secret'
+ - 'downwardAPI'
+ hostNetwork: {{ .Values.webhook.hostNetwork }}
+ {{- if .Values.webhook.hostNetwork }}
+ hostPorts:
+ - max: {{ .Values.webhook.securePort }}
+ min: {{ .Values.webhook.securePort }}
+ {{- end }}
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1000
+ max: 1000
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1000
+ max: 1000
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1000
+ max: 1000
+{{- end }}
diff --git a/charts/cert-manager/templates/webhook-rbac.yaml b/charts/cert-manager/templates/webhook-rbac.yaml
new file mode 100644
index 0000000..b075ffd
--- /dev/null
+++ b/charts/cert-manager/templates/webhook-rbac.yaml
@@ -0,0 +1,83 @@
+{{- if .Values.global.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ template "webhook.fullname" . }}:dynamic-serving
+ namespace: {{ include "cert-manager.namespace" . }}
+ labels:
+ app: {{ include "webhook.name" . }}
+ app.kubernetes.io/name: {{ include "webhook.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "webhook"
+ {{- include "labels" . | nindent 4 }}
+rules:
+- apiGroups: [""]
+ resources: ["secrets"]
+ resourceNames:
+ - '{{ template "webhook.fullname" . }}-ca'
+ verbs: ["get", "list", "watch", "update"]
+# It's not possible to grant CREATE permission on a single resourceName.
+- apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["create"]
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ template "webhook.fullname" . }}:dynamic-serving
+ namespace: {{ include "cert-manager.namespace" . }}
+ labels:
+ app: {{ include "webhook.name" . }}
+ app.kubernetes.io/name: {{ include "webhook.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "webhook"
+ {{- include "labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ template "webhook.fullname" . }}:dynamic-serving
+subjects:
+- apiGroup: ""
+ kind: ServiceAccount
+ name: {{ template "webhook.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "webhook.fullname" . }}:subjectaccessreviews
+ labels:
+ app: {{ include "webhook.name" . }}
+ app.kubernetes.io/name: {{ include "webhook.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "webhook"
+ {{- include "labels" . | nindent 4 }}
+rules:
+- apiGroups: ["authorization.k8s.io"]
+ resources: ["subjectaccessreviews"]
+ verbs: ["create"]
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "webhook.fullname" . }}:subjectaccessreviews
+ labels:
+ app: {{ include "webhook.name" . }}
+ app.kubernetes.io/name: {{ include "webhook.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "webhook"
+ {{- include "labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "webhook.fullname" . }}:subjectaccessreviews
+subjects:
+- apiGroup: ""
+ kind: ServiceAccount
+ name: {{ template "webhook.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+{{- end }}
diff --git a/charts/cert-manager/templates/webhook-service.yaml b/charts/cert-manager/templates/webhook-service.yaml
new file mode 100644
index 0000000..5f93950
--- /dev/null
+++ b/charts/cert-manager/templates/webhook-service.yaml
@@ -0,0 +1,32 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "webhook.fullname" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+{{- with .Values.webhook.serviceAnnotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+ labels:
+ app: {{ include "webhook.name" . }}
+ app.kubernetes.io/name: {{ include "webhook.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "webhook"
+ {{- include "labels" . | nindent 4 }}
+ {{- with .Values.webhook.serviceLabels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ type: {{ .Values.webhook.serviceType }}
+ {{- with .Values.webhook.loadBalancerIP }}
+ loadBalancerIP: {{ . }}
+ {{- end }}
+ ports:
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: "https"
+ selector:
+ app.kubernetes.io/name: {{ include "webhook.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "webhook"
diff --git a/charts/cert-manager/templates/webhook-serviceaccount.yaml b/charts/cert-manager/templates/webhook-serviceaccount.yaml
new file mode 100644
index 0000000..dff5c06
--- /dev/null
+++ b/charts/cert-manager/templates/webhook-serviceaccount.yaml
@@ -0,0 +1,25 @@
+{{- if .Values.webhook.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+automountServiceAccountToken: {{ .Values.webhook.serviceAccount.automountServiceAccountToken }}
+metadata:
+ name: {{ template "webhook.serviceAccountName" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ {{- with .Values.webhook.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ labels:
+ app: {{ include "webhook.name" . }}
+ app.kubernetes.io/name: {{ include "webhook.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "webhook"
+ {{- include "labels" . | nindent 4 }}
+ {{- with .Values.webhook.serviceAccount.labels }}
+ {{ toYaml . | nindent 4 }}
+ {{- end }}
+{{- with .Values.global.imagePullSecrets }}
+imagePullSecrets:
+ {{- toYaml . | nindent 2 }}
+{{- end }}
+{{- end }}
diff --git a/charts/cert-manager/templates/webhook-validating-webhook.yaml b/charts/cert-manager/templates/webhook-validating-webhook.yaml
new file mode 100644
index 0000000..a5d168e
--- /dev/null
+++ b/charts/cert-manager/templates/webhook-validating-webhook.yaml
@@ -0,0 +1,55 @@
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ name: {{ include "webhook.fullname" . }}
+ labels:
+ app: {{ include "webhook.name" . }}
+ app.kubernetes.io/name: {{ include "webhook.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: "webhook"
+ {{- include "labels" . | nindent 4 }}
+ annotations:
+ cert-manager.io/inject-ca-from-secret: {{ printf "%s/%s-ca" (include "cert-manager.namespace" .) (include "webhook.fullname" .) | quote}}
+ {{- with .Values.webhook.validatingWebhookConfigurationAnnotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+webhooks:
+ - name: webhook.cert-manager.io
+ namespaceSelector:
+ matchExpressions:
+ - key: "cert-manager.io/disable-validation"
+ operator: "NotIn"
+ values:
+ - "true"
+ - key: "name"
+ operator: "NotIn"
+ values:
+ - {{ include "cert-manager.namespace" . }}
+ rules:
+ - apiGroups:
+ - "cert-manager.io"
+ - "acme.cert-manager.io"
+ apiVersions:
+ - "v1"
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - "*/*"
+ admissionReviewVersions: ["v1"]
+ # This webhook only accepts v1 cert-manager resources.
+ # Equivalent matchPolicy ensures that non-v1 resource requests are sent to
+ # this webhook (after the resources have been converted to v1).
+ matchPolicy: Equivalent
+ timeoutSeconds: {{ .Values.webhook.timeoutSeconds }}
+ failurePolicy: Fail
+ sideEffects: None
+ clientConfig:
+ {{- if .Values.webhook.url.host }}
+ url: https://{{ .Values.webhook.url.host }}/validate
+ {{- else }}
+ service:
+ name: {{ template "webhook.fullname" . }}
+ namespace: {{ include "cert-manager.namespace" . }}
+ path: /validate
+ {{- end }}
diff --git a/charts/cert-manager/values.yaml b/charts/cert-manager/values.yaml
new file mode 100644
index 0000000..66df39a
--- /dev/null
+++ b/charts/cert-manager/values.yaml
@@ -0,0 +1,692 @@
+# Default values for cert-manager.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+global:
+ # Reference to one or more secrets to be used when pulling images
+ # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ imagePullSecrets: []
+ # - name: "image-pull-secret"
+
+ # Labels to apply to all resources
+ # Please note that this does not add labels to the resources created dynamically by the controllers.
+ # For these resources, you have to add the labels in the template in the cert-manager custom resource:
+ # eg. podTemplate/ ingressTemplate in ACMEChallengeSolverHTTP01Ingress
+ # ref: https://cert-manager.io/docs/reference/api-docs/#acme.cert-manager.io/v1.ACMEChallengeSolverHTTP01Ingress
+ # eg. secretTemplate in CertificateSpec
+ # ref: https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec
+ commonLabels: {}
+ # team_name: dev
+
+ # Optional priority class to be used for the cert-manager pods
+ priorityClassName: ""
+ rbac:
+ create: true
+ # Aggregate ClusterRoles to Kubernetes default user-facing roles. Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles
+ aggregateClusterRoles: true
+
+ podSecurityPolicy:
+ enabled: false
+ useAppArmor: true
+
+ # Set the verbosity of cert-manager. Range of 0 - 6 with 6 being the most verbose.
+ logLevel: 2
+
+ leaderElection:
+ # Override the namespace used for the leader election lease
+ namespace: "kube-system"
+
+ # The duration that non-leader candidates will wait after observing a
+ # leadership renewal until attempting to acquire leadership of a led but
+ # unrenewed leader slot. This is effectively the maximum duration that a
+ # leader can be stopped before it is replaced by another candidate.
+ # leaseDuration: 60s
+
+ # The interval between attempts by the acting master to renew a leadership
+ # slot before it stops leading. This must be less than or equal to the
+ # lease duration.
+ # renewDeadline: 40s
+
+ # The duration the clients should wait between attempting acquisition and
+ # renewal of a leadership.
+ # retryPeriod: 15s
+
+installCRDs: false
+
+replicaCount: 1
+
+strategy: {}
+ # type: RollingUpdate
+ # rollingUpdate:
+ # maxSurge: 0
+ # maxUnavailable: 1
+
+podDisruptionBudget:
+ enabled: false
+
+ minAvailable: 1
+ # maxUnavailable: 1
+
+ # minAvailable and maxUnavailable can either be set to an integer (e.g. 1)
+ # or a percentage value (e.g. 25%)
+
+# Comma separated list of feature gates that should be enabled on the controller
+# Note: do not use this field to pass feature gate values into webhook
+# component as this behaviour relies on a bug that will be fixed in cert-manager 1.13
+# https://github.com/cert-manager/cert-manager/pull/6093
+# Use webhook.extraArgs to pass --feature-gates flag directly instead.
+featureGates: ""
+
+# The maximum number of challenges that can be scheduled as 'processing' at once
+maxConcurrentChallenges: 60
+
+image:
+ repository: quay.io/jetstack/cert-manager-controller
+ # You can manage a registry with
+ # registry: quay.io
+ # repository: jetstack/cert-manager-controller
+
+ # Override the image tag to deploy by setting this variable.
+ # If no value is set, the chart's appVersion will be used.
+ # tag: canary
+
+ # Setting a digest will override any tag
+ # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
+ pullPolicy: IfNotPresent
+
+# Override the namespace used to store DNS provider credentials etc. for ClusterIssuer
+# resources. By default, the same namespace as cert-manager is deployed within is
+# used. This namespace will not be automatically created by the Helm chart.
+clusterResourceNamespace: ""
+
+# This namespace allows you to define where the services will be installed into
+# if not set then they will use the namespace of the release
+# This is helpful when installing cert manager as a chart dependency (sub chart)
+namespace: ""
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ # name: ""
+ # Optional additional annotations to add to the controller's ServiceAccount
+ # annotations: {}
+ # Automount API credentials for a Service Account.
+ # Optional additional labels to add to the controller's ServiceAccount
+ # labels: {}
+ automountServiceAccountToken: true
+
+# Automounting API credentials for a particular pod
+# automountServiceAccountToken: true
+
+# When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted
+enableCertificateOwnerRef: false
+
+# Setting Nameservers for DNS01 Self Check
+# See: https://cert-manager.io/docs/configuration/acme/dns01/#setting-nameservers-for-dns01-self-check
+
+# Comma separated string with host and port of the recursive nameservers cert-manager should query
+dns01RecursiveNameservers: ""
+
+# Forces cert-manager to only use the recursive nameservers for verification.
+# Enabling this option could cause the DNS01 self check to take longer due to caching performed by the recursive nameservers
+dns01RecursiveNameserversOnly: false
+
+# Additional command line flags to pass to cert-manager controller binary.
+# To see all available flags run docker run quay.io/jetstack/cert-manager-controller:<version> --help
+extraArgs: []
+ # Use this flag to enable or disable arbitrary controllers, for example, disable the CertificiateRequests approver
+ # - --controllers=*,-certificaterequests-approver
+
+extraEnv: []
+# - name: SOME_VAR
+# value: 'some value'
+
+resources: {}
+ # requests:
+ # cpu: 10m
+ # memory: 32Mi
+
+# Pod Security Context
+# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+securityContext:
+ runAsNonRoot: true
+ seccompProfile:
+ type: RuntimeDefault
+
+# Container Security Context to be set on the controller component container
+# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+containerSecurityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+
+
+volumes: []
+
+volumeMounts: []
+
+# Optional additional annotations to add to the controller Deployment
+# deploymentAnnotations: {}
+
+# Optional additional annotations to add to the controller Pods
+# podAnnotations: {}
+
+podLabels: {}
+
+# Optional annotations to add to the controller Service
+# serviceAnnotations: {}
+
+# Optional additional labels to add to the controller Service
+# serviceLabels: {}
+
+# Optional DNS settings, useful if you have a public and private DNS zone for
+# the same domain on Route 53. What follows is an example of ensuring
+# cert-manager can access an ingress or DNS TXT records at all times.
+# NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for
+# the cluster to work.
+# podDnsPolicy: "None"
+# podDnsConfig:
+# nameservers:
+# - "1.1.1.1"
+# - "8.8.8.8"
+
+nodeSelector:
+ kubernetes.io/os: linux
+
+ingressShim: {}
+ # defaultIssuerName: ""
+ # defaultIssuerKind: ""
+ # defaultIssuerGroup: ""
+
+prometheus:
+ enabled: true
+ servicemonitor:
+ enabled: false
+ prometheusInstance: default
+ targetPort: 9402
+ path: /metrics
+ interval: 60s
+ scrapeTimeout: 30s
+ labels: {}
+ annotations: {}
+ honorLabels: false
+
+# Use these variables to configure the HTTP_PROXY environment variables
+# http_proxy: "http://proxy:8080"
+# https_proxy: "https://proxy:8080"
+# no_proxy: 127.0.0.1,localhost
+
+# A Kubernetes Affinty, if required; see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core
+# for example:
+# affinity:
+# nodeAffinity:
+# requiredDuringSchedulingIgnoredDuringExecution:
+# nodeSelectorTerms:
+# - matchExpressions:
+# - key: foo.bar.com/role
+# operator: In
+# values:
+# - master
+affinity: {}
+
+# A list of Kubernetes Tolerations, if required; see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core
+# for example:
+# tolerations:
+# - key: foo.bar.com/role
+# operator: Equal
+# value: master
+# effect: NoSchedule
+tolerations: []
+
+# A list of Kubernetes TopologySpreadConstraints, if required; see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#topologyspreadconstraint-v1-core
+# for example:
+# topologySpreadConstraints:
+# - maxSkew: 2
+# topologyKey: topology.kubernetes.io/zone
+# whenUnsatisfiable: ScheduleAnyway
+# labelSelector:
+# matchLabels:
+# app.kubernetes.io/instance: cert-manager
+# app.kubernetes.io/component: controller
+topologySpreadConstraints: []
+
+# LivenessProbe settings for the controller container of the controller Pod.
+#
+# Disabled by default, because the controller has a leader election mechanism
+# which should cause it to exit if it is unable to renew its leader election
+# record.
+# LivenessProbe durations and thresholds are based on those used for the Kubernetes
+# controller-manager. See:
+# https://github.com/kubernetes/kubernetes/blob/806b30170c61a38fedd54cc9ede4cd6275a1ad3b/cmd/kubeadm/app/util/staticpod/utils.go#L241-L245
+livenessProbe:
+ enabled: false
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 15
+ successThreshold: 1
+ failureThreshold: 8
+
+webhook:
+ replicaCount: 1
+ timeoutSeconds: 10
+
+ # Used to configure options for the webhook pod.
+ # This allows setting options that'd usually be provided via flags.
+ # An APIVersion and Kind must be specified in your values.yaml file.
+ # Flags will override options that are set here.
+ config:
+ # apiVersion: webhook.config.cert-manager.io/v1alpha1
+ # kind: WebhookConfiguration
+
+ # The port that the webhook should listen on for requests.
+ # In GKE private clusters, by default kubernetes apiservers are allowed to
+ # talk to the cluster nodes only on 443 and 10250. so configuring
+ # securePort: 10250, will work out of the box without needing to add firewall
+ # rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000.
+ # This should be uncommented and set as a default by the chart once we graduate
+ # the apiVersion of WebhookConfiguration past v1alpha1.
+ # securePort: 10250
+
+ strategy: {}
+ # type: RollingUpdate
+ # rollingUpdate:
+ # maxSurge: 0
+ # maxUnavailable: 1
+
+ # Pod Security Context to be set on the webhook component Pod
+ # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ securityContext:
+ runAsNonRoot: true
+ seccompProfile:
+ type: RuntimeDefault
+
+ podDisruptionBudget:
+ enabled: false
+
+ minAvailable: 1
+ # maxUnavailable: 1
+
+ # minAvailable and maxUnavailable can either be set to an integer (e.g. 1)
+ # or a percentage value (e.g. 25%)
+
+ # Container Security Context to be set on the webhook component container
+ # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ containerSecurityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+
+ # Optional additional annotations to add to the webhook Deployment
+ # deploymentAnnotations: {}
+
+ # Optional additional annotations to add to the webhook Pods
+ # podAnnotations: {}
+
+ # Optional additional annotations to add to the webhook Service
+ # serviceAnnotations: {}
+
+ # Optional additional annotations to add to the webhook MutatingWebhookConfiguration
+ # mutatingWebhookConfigurationAnnotations: {}
+
+ # Optional additional annotations to add to the webhook ValidatingWebhookConfiguration
+ # validatingWebhookConfigurationAnnotations: {}
+
+ # Additional command line flags to pass to cert-manager webhook binary.
+ # To see all available flags run docker run quay.io/jetstack/cert-manager-webhook:<version> --help
+ extraArgs: []
+ # Path to a file containing a WebhookConfiguration object used to configure the webhook
+ # - --config=<path-to-config-file>
+
+ resources: {}
+ # requests:
+ # cpu: 10m
+ # memory: 32Mi
+
+ ## Liveness and readiness probe values
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
+ ##
+ livenessProbe:
+ failureThreshold: 3
+ initialDelaySeconds: 60
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ readinessProbe:
+ failureThreshold: 3
+ initialDelaySeconds: 5
+ periodSeconds: 5
+ successThreshold: 1
+ timeoutSeconds: 1
+
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ affinity: {}
+
+ tolerations: []
+
+ topologySpreadConstraints: []
+
+ # Optional additional labels to add to the Webhook Pods
+ podLabels: {}
+
+ # Optional additional labels to add to the Webhook Service
+ serviceLabels: {}
+
+ image:
+ repository: quay.io/jetstack/cert-manager-webhook
+ # You can manage a registry with
+ # registry: quay.io
+ # repository: jetstack/cert-manager-webhook
+
+ # Override the image tag to deploy by setting this variable.
+ # If no value is set, the chart's appVersion will be used.
+ # tag: canary
+
+ # Setting a digest will override any tag
+ # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
+
+ pullPolicy: IfNotPresent
+
+ serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ # name: ""
+ # Optional additional annotations to add to the controller's ServiceAccount
+ # annotations: {}
+ # Optional additional labels to add to the webhook's ServiceAccount
+ # labels: {}
+ # Automount API credentials for a Service Account.
+ automountServiceAccountToken: true
+
+ # Automounting API credentials for a particular pod
+ # automountServiceAccountToken: true
+
+ # The port that the webhook should listen on for requests.
+ # In GKE private clusters, by default kubernetes apiservers are allowed to
+ # talk to the cluster nodes only on 443 and 10250. so configuring
+ # securePort: 10250, will work out of the box without needing to add firewall
+ # rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000
+ securePort: 10250
+
+ # Specifies if the webhook should be started in hostNetwork mode.
+ #
+ # Required for use in some managed kubernetes clusters (such as AWS EKS) with custom
+ # CNI (such as calico), because control-plane managed by AWS cannot communicate
+ # with pods' IP CIDR and admission webhooks are not working
+ #
+ # Since the default port for the webhook conflicts with kubelet on the host
+ # network, `webhook.securePort` should be changed to an available port if
+ # running in hostNetwork mode.
+ hostNetwork: false
+
+ # Specifies how the service should be handled. Useful if you want to expose the
+ # webhook to outside of the cluster. In some cases, the control plane cannot
+ # reach internal services.
+ serviceType: ClusterIP
+ # loadBalancerIP:
+
+ # Overrides the mutating webhook and validating webhook so they reach the webhook
+ # service using the `url` field instead of a service.
+ url: {}
+ # host:
+
+ # Enables default network policies for webhooks.
+ networkPolicy:
+ enabled: false
+ ingress:
+ - from:
+ - ipBlock:
+ cidr: 0.0.0.0/0
+ egress:
+ - ports:
+ - port: 80
+ protocol: TCP
+ - port: 443
+ protocol: TCP
+ - port: 53
+ protocol: TCP
+ - port: 53
+ protocol: UDP
+ # On OpenShift and OKD, the Kubernetes API server listens on
+ # port 6443.
+ - port: 6443
+ protocol: TCP
+ to:
+ - ipBlock:
+ cidr: 0.0.0.0/0
+
+ volumes: []
+ volumeMounts: []
+
+cainjector:
+ enabled: true
+ replicaCount: 1
+
+ strategy: {}
+ # type: RollingUpdate
+ # rollingUpdate:
+ # maxSurge: 0
+ # maxUnavailable: 1
+
+ # Pod Security Context to be set on the cainjector component Pod
+ # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ securityContext:
+ runAsNonRoot: true
+ seccompProfile:
+ type: RuntimeDefault
+
+ podDisruptionBudget:
+ enabled: false
+
+ minAvailable: 1
+ # maxUnavailable: 1
+
+ # minAvailable and maxUnavailable can either be set to an integer (e.g. 1)
+ # or a percentage value (e.g. 25%)
+
+ # Container Security Context to be set on the cainjector component container
+ # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ containerSecurityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+
+
+ # Optional additional annotations to add to the cainjector Deployment
+ # deploymentAnnotations: {}
+
+ # Optional additional annotations to add to the cainjector Pods
+ # podAnnotations: {}
+
+ # Additional command line flags to pass to cert-manager cainjector binary.
+ # To see all available flags run docker run quay.io/jetstack/cert-manager-cainjector:<version> --help
+ extraArgs: []
+ # Enable profiling for cainjector
+ # - --enable-profiling=true
+
+ resources: {}
+ # requests:
+ # cpu: 10m
+ # memory: 32Mi
+
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ affinity: {}
+
+ tolerations: []
+
+ topologySpreadConstraints: []
+
+ # Optional additional labels to add to the CA Injector Pods
+ podLabels: {}
+
+ image:
+ repository: quay.io/jetstack/cert-manager-cainjector
+ # You can manage a registry with
+ # registry: quay.io
+ # repository: jetstack/cert-manager-cainjector
+
+ # Override the image tag to deploy by setting this variable.
+ # If no value is set, the chart's appVersion will be used.
+ # tag: canary
+
+ # Setting a digest will override any tag
+ # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
+
+ pullPolicy: IfNotPresent
+
+ serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ # name: ""
+ # Optional additional annotations to add to the controller's ServiceAccount
+ # annotations: {}
+ # Automount API credentials for a Service Account.
+ # Optional additional labels to add to the cainjector's ServiceAccount
+ # labels: {}
+ automountServiceAccountToken: true
+
+ # Automounting API credentials for a particular pod
+ # automountServiceAccountToken: true
+
+ volumes: []
+ volumeMounts: []
+
+acmesolver:
+ image:
+ repository: quay.io/jetstack/cert-manager-acmesolver
+ # You can manage a registry with
+ # registry: quay.io
+ # repository: jetstack/cert-manager-acmesolver
+
+ # Override the image tag to deploy by setting this variable.
+ # If no value is set, the chart's appVersion will be used.
+ # tag: canary
+
+ # Setting a digest will override any tag
+ # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
+
+# This startupapicheck is a Helm post-install hook that waits for the webhook
+# endpoints to become available.
+# The check is implemented using a Kubernetes Job- if you are injecting mesh
+# sidecar proxies into cert-manager pods, you probably want to ensure that they
+# are not injected into this Job's pod. Otherwise the installation may time out
+# due to the Job never being completed because the sidecar proxy does not exit.
+# See https://github.com/cert-manager/cert-manager/pull/4414 for context.
+startupapicheck:
+ enabled: true
+
+ # Pod Security Context to be set on the startupapicheck component Pod
+ # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ securityContext:
+ runAsNonRoot: true
+ seccompProfile:
+ type: RuntimeDefault
+
+ # Container Security Context to be set on the controller component container
+ # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ containerSecurityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+
+ # Timeout for 'kubectl check api' command
+ timeout: 1m
+
+ # Job backoffLimit
+ backoffLimit: 4
+
+ # Optional additional annotations to add to the startupapicheck Job
+ jobAnnotations:
+ helm.sh/hook: post-install
+ helm.sh/hook-weight: "1"
+ helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
+
+ # Optional additional annotations to add to the startupapicheck Pods
+ # podAnnotations: {}
+
+ # Additional command line flags to pass to startupapicheck binary.
+ # To see all available flags run docker run quay.io/jetstack/cert-manager-ctl:<version> --help
+ extraArgs: []
+
+ resources: {}
+ # requests:
+ # cpu: 10m
+ # memory: 32Mi
+
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ affinity: {}
+
+ tolerations: []
+
+ # Optional additional labels to add to the startupapicheck Pods
+ podLabels: {}
+
+ image:
+ repository: quay.io/jetstack/cert-manager-ctl
+ # You can manage a registry with
+ # registry: quay.io
+ # repository: jetstack/cert-manager-ctl
+
+ # Override the image tag to deploy by setting this variable.
+ # If no value is set, the chart's appVersion will be used.
+ # tag: canary
+
+ # Setting a digest will override any tag
+ # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
+
+ pullPolicy: IfNotPresent
+
+ rbac:
+ # annotations for the startup API Check job RBAC and PSP resources
+ annotations:
+ helm.sh/hook: post-install
+ helm.sh/hook-weight: "-5"
+ helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
+
+ # Automounting API credentials for a particular pod
+ # automountServiceAccountToken: true
+
+ serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ # name: ""
+
+ # Optional additional annotations to add to the Job's ServiceAccount
+ annotations:
+ helm.sh/hook: post-install
+ helm.sh/hook-weight: "-5"
+ helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
+
+ # Automount API credentials for a Service Account.
+ automountServiceAccountToken: true
+
+ # Optional additional labels to add to the startupapicheck's ServiceAccount
+ # labels: {}
+
+ volumes: []
+ volumeMounts: []
diff --git a/charts/certificate-issuer-private/Chart.yaml b/charts/certificate-issuer-private/Chart.yaml
new file mode 100644
index 0000000..0d06818
--- /dev/null
+++ b/charts/certificate-issuer-private/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: certificate-issuer
+description: A Helm chart for PCloud public and private certificate issuer
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/certificate-issuer-private/templates/certificate-wildcard.yaml b/charts/certificate-issuer-private/templates/certificate-wildcard.yaml
new file mode 100644
index 0000000..b478cc3
--- /dev/null
+++ b/charts/certificate-issuer-private/templates/certificate-wildcard.yaml
@@ -0,0 +1,14 @@
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: wildcard-{{ .Values.issuer.domain }}
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ helm.sh/resource-policy: keep
+spec:
+ dnsNames:
+ - '*.{{ .Values.issuer.domain }}'
+ issuerRef:
+ name: {{ .Values.issuer.name }}
+ kind: Issuer
+ secretName: cert-wildcard.{{ .Values.issuer.domain }}
diff --git a/charts/certificate-issuer-private/templates/issuer.yaml b/charts/certificate-issuer-private/templates/issuer.yaml
new file mode 100644
index 0000000..bc29bed
--- /dev/null
+++ b/charts/certificate-issuer-private/templates/issuer.yaml
@@ -0,0 +1,19 @@
+apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+ name: {{ .Values.issuer.name }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ acme:
+ server: {{ .Values.issuer.server }}
+ email: {{ .Values.issuer.contactEmail }}
+ privateKeySecretRef:
+ name: issuer-{{ .Values.issuer.name }}-account-key
+ solvers:
+ - dns01:
+ webhook:
+ groupName: dodo.cloud # TODO(gio): configurable, this and one below
+ solverName: dns-resolver-pcloud
+ config:
+ createTXTAddr: {{ .Values.config.createTXTAddr }}
+ deleteTXTAddr: {{ .Values.config.deleteTXTAddr }}
diff --git a/charts/certificate-issuer-private/values.yaml b/charts/certificate-issuer-private/values.yaml
new file mode 100644
index 0000000..fd0d9bd
--- /dev/null
+++ b/charts/certificate-issuer-private/values.yaml
@@ -0,0 +1,9 @@
+issuer:
+ name: selfsigned-private
+ server: https://acme-v02.api.letsencrypt.org/directory
+ contactEmail: admin@example.com
+ gandiAPIToken: token
+ domain: p.example.com
+config:
+ createTXTAddr: http://10.44.0.1/create-txt-record
+ deleteTXTAddr: http://10.44.0.1/delete-txt-record
diff --git a/charts/certificate-issuer-public/Chart.yaml b/charts/certificate-issuer-public/Chart.yaml
new file mode 100644
index 0000000..0d06818
--- /dev/null
+++ b/charts/certificate-issuer-public/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: certificate-issuer
+description: A Helm chart for PCloud public and private certificate issuer
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/certificate-issuer-public/templates/issuer.yaml b/charts/certificate-issuer-public/templates/issuer.yaml
new file mode 100644
index 0000000..4bc338c
--- /dev/null
+++ b/charts/certificate-issuer-public/templates/issuer.yaml
@@ -0,0 +1,17 @@
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: {{ .Values.issuer.name }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ acme:
+ server: {{ .Values.issuer.server }}
+ email: {{ .Values.issuer.contactEmail }}
+ privateKeySecretRef:
+ name: cluster-issuer-{{ .Values.issuer.name }}-account-key
+ solvers:
+ - selector: {}
+ http01:
+ ingress:
+ class: {{ .Values.issuer.ingressClass }}
+ serviceType: ClusterIP
diff --git a/charts/certificate-issuer-public/values.yaml b/charts/certificate-issuer-public/values.yaml
new file mode 100644
index 0000000..cad147a
--- /dev/null
+++ b/charts/certificate-issuer-public/values.yaml
@@ -0,0 +1,6 @@
+issuer:
+ name: letsencrypt-prod
+ server: https://acme-v02.api.letsencrypt.org/directory
+ contactEmail: admin@example.com
+ ingressClass: ingress-nginx
+ domain: example.com
diff --git a/charts/coder/.helmignore b/charts/coder/.helmignore
new file mode 100644
index 0000000..957f15e
--- /dev/null
+++ b/charts/coder/.helmignore
@@ -0,0 +1,27 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
+Makefile
+artifacthub-repo.yml
+
+tests
diff --git a/charts/coder/Chart.lock b/charts/coder/Chart.lock
new file mode 100644
index 0000000..9692722
--- /dev/null
+++ b/charts/coder/Chart.lock
@@ -0,0 +1,6 @@
+dependencies:
+- name: libcoder
+ repository: file://../libcoder
+ version: 0.1.0
+digest: sha256:5c9a99109258073b590a9f98268490ef387fde24c0c7c7ade9c1a8c7ef5e6e10
+generated: "2023-08-08T07:27:19.677972411Z"
diff --git a/charts/coder/Chart.yaml b/charts/coder/Chart.yaml
new file mode 100644
index 0000000..530fac7
--- /dev/null
+++ b/charts/coder/Chart.yaml
@@ -0,0 +1,22 @@
+apiVersion: v2
+appVersion: 2.13.2
+dependencies:
+- name: libcoder
+ repository: file://../libcoder
+ version: 0.1.0
+description: Remote development environments on your infrastructure
+home: https://github.com/coder/coder
+icon: https://helm.coder.com/coder_logo_black.png
+keywords:
+- coder
+- terraform
+kubeVersion: '>= 1.19.0-0'
+maintainers:
+- email: support@coder.com
+ name: Coder Technologies, Inc.
+ url: https://coder.com/contact
+name: coder
+sources:
+- https://github.com/coder/coder/tree/main/helm/coder
+type: application
+version: 2.13.2
diff --git a/charts/coder/README.md b/charts/coder/README.md
new file mode 100644
index 0000000..1a1933d
--- /dev/null
+++ b/charts/coder/README.md
@@ -0,0 +1,53 @@
+# Coder Helm Chart
+
+This directory contains the Helm chart used to deploy Coder onto a Kubernetes
+cluster. It contains the minimum required components to run Coder on Kubernetes,
+and notably (compared to Coder Classic) does not include a database server.
+
+## Getting Started
+
+> **Warning**: The main branch in this repository does not represent the
+> latest release of Coder. Please reference our installation docs for
+> instructions on a tagged release.
+
+View
+[our docs](https://coder.com/docs/coder-oss/latest/install/kubernetes)
+for detailed installation instructions.
+
+## Values
+
+Please refer to [values.yaml](values.yaml) for available Helm values and their
+defaults.
+
+A good starting point for your values file is:
+
+```yaml
+coder:
+ # You can specify any environment variables you'd like to pass to Coder
+ # here. Coder consumes environment variables listed in
+ # `coder server --help`, and these environment variables are also passed
+ # to the workspace provisioner (so you can consume them in your Terraform
+ # templates for auth keys etc.).
+ #
+ # Please keep in mind that you should not set `CODER_HTTP_ADDRESS`,
+ # `CODER_TLS_ENABLE`, `CODER_TLS_CERT_FILE` or `CODER_TLS_KEY_FILE` as
+ # they are already set by the Helm chart and will cause conflicts.
+ env:
+ - name: CODER_ACCESS_URL
+ value: "https://coder.example.com"
+ - name: CODER_PG_CONNECTION_URL
+ valueFrom:
+ secretKeyRef:
+ # You'll need to create a secret called coder-db-url with your
+ # Postgres connection URL like:
+ # postgres://coder:password@postgres:5432/coder?sslmode=disable
+ name: coder-db-url
+ key: url
+
+ # This env enables the Prometheus metrics endpoint.
+ - name: CODER_PROMETHEUS_ADDRESS
+ value: "0.0.0.0:2112"
+ tls:
+ secretNames:
+ - my-tls-secret-name
+```
diff --git a/charts/coder/charts/libcoder/Chart.yaml b/charts/coder/charts/libcoder/Chart.yaml
new file mode 100644
index 0000000..7cf6ff9
--- /dev/null
+++ b/charts/coder/charts/libcoder/Chart.yaml
@@ -0,0 +1,11 @@
+apiVersion: v2
+appVersion: 0.1.0
+description: Coder library chart
+home: https://github.com/coder/coder
+maintainers:
+- email: support@coder.com
+ name: Coder Technologies, Inc.
+ url: https://coder.com/contact
+name: libcoder
+type: library
+version: 0.1.0
diff --git a/charts/coder/charts/libcoder/templates/_coder.yaml b/charts/coder/charts/libcoder/templates/_coder.yaml
new file mode 100644
index 0000000..77cdbb2
--- /dev/null
+++ b/charts/coder/charts/libcoder/templates/_coder.yaml
@@ -0,0 +1,85 @@
+{{- define "libcoder.deployment.tpl" -}}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "coder.name" .}}
+ labels:
+ {{- include "coder.labels" . | nindent 4 }}
+ {{- with .Values.coder.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ annotations: {{ toYaml .Values.coder.annotations | nindent 4}}
+spec:
+ replicas: {{ .Values.coder.replicaCount }}
+ selector:
+ matchLabels:
+ {{- include "coder.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "coder.labels" . | nindent 8 }}
+ {{- with .Values.coder.podLabels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ annotations:
+ {{- toYaml .Values.coder.podAnnotations | nindent 8 }}
+ spec:
+ serviceAccountName: {{ .Values.coder.serviceAccount.name | quote }}
+ restartPolicy: Always
+ {{- with .Values.coder.image.pullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ terminationGracePeriodSeconds: 60
+ {{- with .Values.coder.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.coder.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.coder.nodeSelector }}
+ nodeSelector:
+ {{ toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.coder.initContainers }}
+ initContainers:
+ {{ toYaml . | nindent 8 }}
+ {{- end }}
+ containers: []
+ {{- include "coder.volumes" . | nindent 6 }}
+{{- end -}}
+{{- define "libcoder.deployment" -}}
+{{- include "libcoder.util.merge" (append . "libcoder.deployment.tpl") -}}
+{{- end -}}
+
+{{- define "libcoder.containerspec.tpl" -}}
+name: coder
+image: {{ include "coder.image" . | quote }}
+imagePullPolicy: {{ .Values.coder.image.pullPolicy }}
+command:
+ {{- toYaml .Values.coder.command | nindent 2 }}
+resources:
+ {{- toYaml .Values.coder.resources | nindent 2 }}
+lifecycle:
+ {{- toYaml .Values.coder.lifecycle | nindent 2 }}
+securityContext: {{ toYaml .Values.coder.securityContext | nindent 2 }}
+{{ include "coder.volumeMounts" . }}
+{{- end -}}
+{{- define "libcoder.containerspec" -}}
+{{- include "libcoder.util.merge" (append . "libcoder.containerspec.tpl") -}}
+{{- end -}}
+
+{{- define "libcoder.serviceaccount.tpl" -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ .Values.coder.serviceAccount.name | quote }}
+ annotations: {{ toYaml .Values.coder.serviceAccount.annotations | nindent 4 }}
+ labels:
+ {{- include "coder.labels" . | nindent 4 }}
+{{- end -}}
+{{- define "libcoder.serviceaccount" -}}
+{{- include "libcoder.util.merge" (append . "libcoder.serviceaccount.tpl") -}}
+{{- end -}}
diff --git a/charts/coder/charts/libcoder/templates/_helpers.tpl b/charts/coder/charts/libcoder/templates/_helpers.tpl
new file mode 100644
index 0000000..9a6c5df
--- /dev/null
+++ b/charts/coder/charts/libcoder/templates/_helpers.tpl
@@ -0,0 +1,200 @@
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "coder.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "coder.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Selector labels
+
+!!!!! DO NOT ADD ANY MORE SELECTORS. IT IS A BREAKING CHANGE !!!!!
+*/}}
+{{- define "coder.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "coder.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "coder.labels" -}}
+helm.sh/chart: {{ include "coder.chart" . }}
+{{ include "coder.selectorLabels" . }}
+app.kubernetes.io/part-of: {{ include "coder.name" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Coder Docker image URI
+*/}}
+{{- define "coder.image" -}}
+{{- if and (eq .Values.coder.image.tag "") (eq .Chart.AppVersion "0.1.0") -}}
+{{ fail "You must specify the coder.image.tag value if you're installing the Helm chart directly from Git." }}
+{{- end -}}
+{{ .Values.coder.image.repo }}:{{ .Values.coder.image.tag | default (printf "v%v" .Chart.AppVersion) }}
+{{- end }}
+
+{{/*
+Coder TLS enabled.
+*/}}
+{{- define "coder.tlsEnabled" -}}
+ {{- if hasKey .Values.coder "tls" -}}
+ {{- if .Values.coder.tls.secretNames -}}
+ true
+ {{- else -}}
+ false
+ {{- end -}}
+ {{- else -}}
+ false
+ {{- end -}}
+{{- end }}
+
+{{/*
+Coder TLS environment variables.
+*/}}
+{{- define "coder.tlsEnv" }}
+{{- if eq (include "coder.tlsEnabled" .) "true" }}
+- name: CODER_TLS_ENABLE
+ value: "true"
+- name: CODER_TLS_ADDRESS
+ value: "0.0.0.0:8443"
+- name: CODER_TLS_CERT_FILE
+ value: "{{ range $idx, $secretName := .Values.coder.tls.secretNames -}}{{ if $idx }},{{ end }}/etc/ssl/certs/coder/{{ $secretName }}/tls.crt{{- end }}"
+- name: CODER_TLS_KEY_FILE
+ value: "{{ range $idx, $secretName := .Values.coder.tls.secretNames -}}{{ if $idx }},{{ end }}/etc/ssl/certs/coder/{{ $secretName }}/tls.key{{- end }}"
+{{- end }}
+{{- end }}
+
+{{/*
+Coder default access URL
+*/}}
+{{- define "coder.defaultAccessURL" }}
+{{- if eq (include "coder.tlsEnabled" .) "true" -}}
+https
+{{- else -}}
+http
+{{- end -}}
+://coder.{{ .Release.Namespace }}.svc.cluster.local
+{{- end }}
+
+{{/*
+Coder volume definitions.
+*/}}
+{{- define "coder.volumeList" }}
+{{- if hasKey .Values.coder "tls" -}}
+{{- range $secretName := .Values.coder.tls.secretNames }}
+- name: "tls-{{ $secretName }}"
+ secret:
+ secretName: {{ $secretName | quote }}
+{{ end -}}
+{{- end }}
+{{ range $secret := .Values.coder.certs.secrets -}}
+- name: "ca-cert-{{ $secret.name }}"
+ secret:
+ secretName: {{ $secret.name | quote }}
+{{ end -}}
+{{ if gt (len .Values.coder.volumes) 0 -}}
+{{ toYaml .Values.coder.volumes }}
+{{ end -}}
+{{- end }}
+
+{{/*
+Coder volumes yaml.
+*/}}
+{{- define "coder.volumes" }}
+{{- if trim (include "coder.volumeList" .) -}}
+volumes:
+{{- include "coder.volumeList" . -}}
+{{- else -}}
+volumes: []
+{{- end -}}
+{{- end }}
+
+{{/*
+Coder volume mounts.
+*/}}
+{{- define "coder.volumeMountList" }}
+{{- if hasKey .Values.coder "tls" }}
+{{ range $secretName := .Values.coder.tls.secretNames -}}
+- name: "tls-{{ $secretName }}"
+ mountPath: "/etc/ssl/certs/coder/{{ $secretName }}"
+ readOnly: true
+{{ end -}}
+{{- end }}
+{{ range $secret := .Values.coder.certs.secrets -}}
+- name: "ca-cert-{{ $secret.name }}"
+ mountPath: "/etc/ssl/certs/{{ $secret.name }}.crt"
+ subPath: {{ $secret.key | quote }}
+ readOnly: true
+{{ end -}}
+{{ if gt (len .Values.coder.volumeMounts) 0 -}}
+{{ toYaml .Values.coder.volumeMounts }}
+{{ end -}}
+{{- end }}
+
+{{/*
+Coder volume mounts yaml.
+*/}}
+{{- define "coder.volumeMounts" }}
+{{- if trim (include "coder.volumeMountList" .) -}}
+volumeMounts:
+{{- include "coder.volumeMountList" . -}}
+{{- else -}}
+volumeMounts: []
+{{- end -}}
+{{- end }}
+
+{{/*
+Coder ingress wildcard hostname with the wildcard suffix stripped.
+*/}}
+{{- define "coder.ingressWildcardHost" -}}
+{{/* This regex replace is required as the original input including the suffix
+ * is not a legal ingress host. We need to remove the suffix and keep the
+ * wildcard '*'.
+ *
+ * - '\\*' Starts with '*'
+ * - '[^.]*' Suffix is 0 or more characters, '-suffix'
+ * - '(' Start domain capture group
+ * - '\\.' The domain should be separated with a '.' from the subdomain
+ * - '.*' Rest of the domain.
+ * - ')' $1 is the ''.example.com'
+ */}}
+{{- regexReplaceAll "\\*[^.]*(\\..*)" .Values.coder.ingress.wildcardHost "*${1}" -}}
+{{- end }}
+
+{{/*
+Fail on fully deprecated values or deprecated value combinations. This is
+included at the top of coder.yaml.
+*/}}
+{{- define "coder.verifyDeprecated" }}
+{{/*
+Deprecated value coder.tls.secretName must not be used.
+*/}}
+{{- if .Values.coder.tls.secretName }}
+{{ fail "coder.tls.secretName is deprecated, use coder.tls.secretNames instead." }}
+{{- end }}
+{{- end }}
+
+{{/*
+Renders a value that contains a template.
+Usage:
+{{ include "coder.renderTemplate" ( dict "value" .Values.path.to.the.Value "context" $) }}
+*/}}
+{{- define "coder.renderTemplate" -}}
+ {{- if typeIs "string" .value }}
+ {{- tpl .value .context }}
+ {{- else }}
+ {{- tpl (.value | toYaml) .context }}
+ {{- end }}
+{{- end -}}
diff --git a/charts/coder/charts/libcoder/templates/_rbac.yaml b/charts/coder/charts/libcoder/templates/_rbac.yaml
new file mode 100644
index 0000000..1320c65
--- /dev/null
+++ b/charts/coder/charts/libcoder/templates/_rbac.yaml
@@ -0,0 +1,62 @@
+{{- define "libcoder.rbac.tpl" -}}
+{{- if .Values.coder.serviceAccount.workspacePerms }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ .Values.coder.serviceAccount.name }}-workspace-perms
+rules:
+ - apiGroups: [""]
+ resources: ["pods"]
+ verbs:
+ - create
+ - delete
+ - deletecollection
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs:
+ - create
+ - delete
+ - deletecollection
+ - get
+ - list
+ - patch
+ - update
+ - watch
+{{- if .Values.coder.serviceAccount.enableDeployments }}
+ - apiGroups:
+ - apps
+ resources:
+ - deployments
+ verbs:
+ - create
+ - delete
+ - deletecollection
+ - get
+ - list
+ - patch
+ - update
+ - watch
+{{- end }}
+{{- with .Values.coder.serviceAccount.extraRules }}
+{{ toYaml . | nindent 2 }}
+{{- end }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ .Values.coder.serviceAccount.name | quote }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ .Values.coder.serviceAccount.name | quote }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ .Values.coder.serviceAccount.name }}-workspace-perms
+{{- end }}
+{{- end -}}
diff --git a/charts/coder/charts/libcoder/templates/_util.yaml b/charts/coder/charts/libcoder/templates/_util.yaml
new file mode 100644
index 0000000..ebdc13e
--- /dev/null
+++ b/charts/coder/charts/libcoder/templates/_util.yaml
@@ -0,0 +1,13 @@
+{{- /*
+ libcoder.util.merge will merge two YAML templates and output the result.
+ This takes an array of three values:
+ - the top context
+ - the template name of the overrides (destination)
+ - the template name of the base (source)
+*/}}
+{{- define "libcoder.util.merge" -}}
+{{- $top := first . -}}
+{{- $overrides := fromYaml (include (index . 1) $top) | default (dict ) -}}
+{{- $tpl := fromYaml (include (index . 2) $top) | default (dict ) -}}
+{{- toYaml (merge $overrides $tpl) -}}
+{{- end -}}
diff --git a/charts/coder/templates/NOTES.txt b/charts/coder/templates/NOTES.txt
new file mode 100644
index 0000000..18fc33c
--- /dev/null
+++ b/charts/coder/templates/NOTES.txt
@@ -0,0 +1,6 @@
+{{/*
+Deprecation notices:
+*/}}
+
+Enjoy Coder! Please create an issue at https://github.com/coder/coder if you run
+into any problems! :)
diff --git a/charts/coder/templates/_coder.tpl b/charts/coder/templates/_coder.tpl
new file mode 100644
index 0000000..d0846ec
--- /dev/null
+++ b/charts/coder/templates/_coder.tpl
@@ -0,0 +1,108 @@
+{{/*
+Service account to merge into the libcoder template
+*/}}
+{{- define "coder.serviceaccount" -}}
+{{- end -}}
+
+{{/*
+Deployment to merge into the libcoder template
+*/}}
+{{- define "coder.deployment" -}}
+spec:
+ template:
+ spec:
+ containers:
+ -
+{{ include "libcoder.containerspec" (list . "coder.containerspec") | indent 8}}
+
+{{- end -}}
+
+{{/*
+ContainerSpec for the Coder container of the Coder deployment
+*/}}
+{{- define "coder.containerspec" -}}
+args:
+{{- if .Values.coder.commandArgs }}
+ {{- toYaml .Values.coder.commandArgs | nindent 12 }}
+{{- else }}
+ {{- if .Values.coder.workspaceProxy }}
+- wsproxy
+ {{- end }}
+- server
+{{- end }}
+{{- if .Values.coder.envFrom }}
+envFrom:
+{{- with .Values.coder.envFrom }}
+{{ toYaml . }}
+{{- end }}
+{{- end }}
+env:
+- name: CODER_HTTP_ADDRESS
+ value: "0.0.0.0:8080"
+- name: CODER_PROMETHEUS_ADDRESS
+ value: "0.0.0.0:2112"
+{{- if .Values.provisionerDaemon.pskSecretName }}
+- name: CODER_PROVISIONER_DAEMON_PSK
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.provisionerDaemon.pskSecretName | quote }}
+ key: psk
+{{- end }}
+ # Set the default access URL so a `helm apply` works by default.
+ # See: https://github.com/coder/coder/issues/5024
+{{- $hasAccessURL := false }}
+{{- range .Values.coder.env }}
+{{- if eq .name "CODER_ACCESS_URL" }}
+{{- $hasAccessURL = true }}
+{{- end }}
+{{- end }}
+{{- if and (not $hasAccessURL) .Values.coder.envUseClusterAccessURL }}
+- name: CODER_ACCESS_URL
+ value: {{ include "coder.defaultAccessURL" . | quote }}
+{{- end }}
+# Used for inter-pod communication with high-availability.
+- name: KUBE_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+- name: CODER_DERP_SERVER_RELAY_URL
+ value: "http://$(KUBE_POD_IP):8080"
+{{- include "coder.tlsEnv" . }}
+{{- with .Values.coder.env }}
+{{ toYaml . }}
+{{- end }}
+ports:
+- name: "http"
+ containerPort: 8080
+ protocol: TCP
+ {{- if eq (include "coder.tlsEnabled" .) "true" }}
+- name: "https"
+ containerPort: 8443
+ protocol: TCP
+ {{- end }}
+ {{- range .Values.coder.env }}
+ {{- if eq .name "CODER_PROMETHEUS_ENABLE" }}
+ {{/*
+ This sadly has to be nested to avoid evaluating the second part
+ of the condition too early and potentially getting type errors if
+ the value is not a string (like a `valueFrom`). We do not support
+ `valueFrom` for this env var specifically.
+ */}}
+ {{- if eq .value "true" }}
+- name: "prometheus-http"
+ containerPort: 2112
+ protocol: TCP
+ {{- end }}
+ {{- end }}
+ {{- end }}
+readinessProbe:
+ httpGet:
+ path: /healthz
+ port: "http"
+ scheme: "HTTP"
+livenessProbe:
+ httpGet:
+ path: /healthz
+ port: "http"
+ scheme: "HTTP"
+{{- end }}
diff --git a/charts/coder/templates/coder.yaml b/charts/coder/templates/coder.yaml
new file mode 100644
index 0000000..65eaac0
--- /dev/null
+++ b/charts/coder/templates/coder.yaml
@@ -0,0 +1,5 @@
+---
+{{ include "libcoder.serviceaccount" (list . "coder.serviceaccount") }}
+
+---
+{{ include "libcoder.deployment" (list . "coder.deployment") }}
diff --git a/charts/coder/templates/extra-templates.yaml b/charts/coder/templates/extra-templates.yaml
new file mode 100644
index 0000000..e047658
--- /dev/null
+++ b/charts/coder/templates/extra-templates.yaml
@@ -0,0 +1,4 @@
+{{- range .Values.extraTemplates }}
+---
+{{ include "coder.renderTemplate" (dict "value" . "context" $) }}
+{{- end }}
diff --git a/charts/coder/templates/ingress.yaml b/charts/coder/templates/ingress.yaml
new file mode 100644
index 0000000..7dd2a13
--- /dev/null
+++ b/charts/coder/templates/ingress.yaml
@@ -0,0 +1,54 @@
+
+{{- if .Values.coder.ingress.enable }}
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: coder
+ labels:
+ {{- include "coder.labels" . | nindent 4 }}
+ annotations:
+ {{- toYaml .Values.coder.ingress.annotations | nindent 4 }}
+spec:
+ {{- if .Values.coder.ingress.className }}
+ {{/* If this is set to an empty string it fails validation on K8s */}}
+ ingressClassName: {{ .Values.coder.ingress.className | quote }}
+ {{- end }}
+
+ rules:
+ - host: {{ .Values.coder.ingress.host | quote }}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: coder
+ port:
+ name: "http"
+
+ {{- if .Values.coder.ingress.wildcardHost }}
+ - host: {{ include "coder.ingressWildcardHost" . | quote }}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: coder
+ port:
+ name: "http"
+ {{- end }}
+
+ {{- if .Values.coder.ingress.tls.enable }}
+ tls:
+ - hosts:
+ - {{ .Values.coder.ingress.host | quote }}
+ secretName: {{ .Values.coder.ingress.tls.secretName | quote}}
+ {{- if .Values.coder.ingress.tls.wildcardSecretName }}
+ - hosts:
+ - {{ include "coder.ingressWildcardHost" . | quote }}
+ secretName: {{ .Values.coder.ingress.tls.wildcardSecretName | quote}}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/charts/coder/templates/rbac.yaml b/charts/coder/templates/rbac.yaml
new file mode 100644
index 0000000..07fb36d
--- /dev/null
+++ b/charts/coder/templates/rbac.yaml
@@ -0,0 +1 @@
+{{ include "libcoder.rbac.tpl" . }}
diff --git a/charts/coder/templates/service.yaml b/charts/coder/templates/service.yaml
new file mode 100644
index 0000000..1881f99
--- /dev/null
+++ b/charts/coder/templates/service.yaml
@@ -0,0 +1,41 @@
+{{- if .Values.coder.service.enable }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: coder
+ labels:
+ {{- include "coder.labels" . | nindent 4 }}
+ annotations:
+ {{- toYaml .Values.coder.service.annotations | nindent 4 }}
+spec:
+ type: {{ .Values.coder.service.type }}
+ sessionAffinity: {{ .Values.coder.service.sessionAffinity }}
+ ports:
+ - name: "http"
+ port: 80
+ targetPort: "http"
+ protocol: TCP
+ {{ if eq .Values.coder.service.type "NodePort" }}
+ nodePort: {{ .Values.coder.service.httpNodePort }}
+ {{ end }}
+ {{- if eq (include "coder.tlsEnabled" .) "true" }}
+ - name: "https"
+ port: 443
+ targetPort: "https"
+ protocol: TCP
+ {{ if eq .Values.coder.service.type "NodePort" }}
+ nodePort: {{ .Values.coder.service.httpsNodePort }}
+ {{ end }}
+ {{- end }}
+ {{- if eq "LoadBalancer" .Values.coder.service.type }}
+ {{- with .Values.coder.service.loadBalancerIP }}
+ loadBalancerIP: {{ . | quote }}
+ {{- end }}
+ {{- with .Values.coder.service.externalTrafficPolicy }}
+ externalTrafficPolicy: {{ . | quote }}
+ {{- end }}
+ {{- end }}
+ selector:
+ {{- include "coder.selectorLabels" . | nindent 4 }}
+{{- end }}
diff --git a/charts/coder/values.yaml b/charts/coder/values.yaml
new file mode 100644
index 0000000..dcf32c6
--- /dev/null
+++ b/charts/coder/values.yaml
@@ -0,0 +1,344 @@
+# coder -- Primary configuration for `coder server`.
+coder:
+ # coder.env -- The environment variables to set for Coder. These can be used
+ # to configure all aspects of `coder server`. Please see `coder server --help`
+ # for information about what environment variables can be set.
+ # Note: The following environment variables are set by default and cannot be
+ # overridden:
+ # - CODER_HTTP_ADDRESS: set to 0.0.0.0:8080 and cannot be changed.
+ # - CODER_TLS_ADDRESS: set to 0.0.0.0:8443 if tls.secretName is not empty.
+ # - CODER_TLS_ENABLE: set if tls.secretName is not empty.
+ # - CODER_TLS_CERT_FILE: set if tls.secretName is not empty.
+ # - CODER_TLS_KEY_FILE: set if tls.secretName is not empty.
+ # - CODER_PROMETHEUS_ADDRESS: set to 0.0.0.0:2112 and cannot be changed.
+ # Prometheus must still be enabled by setting CODER_PROMETHEUS_ENABLE.
+ # - KUBE_POD_IP
+ # - CODER_DERP_SERVER_RELAY_URL
+ #
+ # We will additionally set CODER_ACCESS_URL if unset to the cluster service
+ # URL, unless coder.envUseClusterAccessURL is set to false.
+ env: []
+ # - name: "CODER_ACCESS_URL"
+ # value: "https://coder.example.com"
+
+ # coder.envFrom -- Secrets or ConfigMaps to use for Coder's environment
+ # variables. If you want one environment variable read from a secret, then use
+ # coder.env valueFrom. See the K8s docs for valueFrom here:
+ # https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#define-container-environment-variables-using-secret-data
+ #
+ # If setting CODER_ACCESS_URL in coder.envFrom, then you must set
+ # coder.envUseClusterAccessURL to false.
+ envFrom: []
+ # - configMapRef:
+ # name: coder-config
+ # - secretRef:
+ # name: coder-config
+
+ # coder.envUseClusterAccessURL -- Determines whether the CODER_ACCESS_URL env
+ # is added to coder.env if it's not already set there. Set this to false if
+ # defining CODER_ACCESS_URL in coder.envFrom to avoid conflicts.
+ envUseClusterAccessURL: true
+
+ # coder.image -- The image to use for Coder.
+ image:
+ # coder.image.repo -- The repository of the image.
+ repo: "ghcr.io/coder/coder"
+ # coder.image.tag -- The tag of the image, defaults to {{.Chart.AppVersion}}
+ # if not set. If you're using the chart directly from git, the default
+ # app version will not work and you'll need to set this value. The helm
+ # chart helpfully fails quickly in this case.
+ tag: ""
+ # coder.image.pullPolicy -- The pull policy to use for the image. See:
+ # https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy
+ pullPolicy: IfNotPresent
+ # coder.image.pullSecrets -- The secrets used for pulling the Coder image from
+ # a private registry.
+ pullSecrets: []
+ # - name: "pull-secret"
+
+ # coder.initContainers -- Init containers for the deployment. See:
+ # https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
+ initContainers:
+ []
+ # - name: init-container
+ # image: busybox:1.28
+ # command: ['sh', '-c', "sleep 2"]
+
+ # coder.annotations -- The Deployment annotations. See:
+ # https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+ annotations: {}
+
+ # coder.labels -- The Deployment labels. See:
+ # https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+ labels: {}
+
+ # coder.podAnnotations -- The Coder pod annotations. See:
+ # https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+ podAnnotations: {}
+
+ # coder.podLabels -- The Coder pod labels. See:
+ # https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+ podLabels: {}
+
+ # coder.serviceAccount -- Configuration for the automatically created service
+ # account. Creation of the service account cannot be disabled.
+ serviceAccount:
+ # coder.serviceAccount.workspacePerms -- Whether or not to grant the coder
+ # service account permissions to manage workspaces. This includes
+ # permission to manage pods and persistent volume claims in the deployment
+ # namespace.
+ #
+ # It is recommended to keep this on if you are using Kubernetes templates
+ # within Coder.
+ workspacePerms: true
+ # coder.serviceAccount.enableDeployments -- Provides the service account
+ # permission to manage Kubernetes deployments. Depends on workspacePerms.
+ enableDeployments: true
+ # coder.serviceAccount.extraRules -- Additional permissions added to the SA
+ # role. Depends on workspacePerms.
+ extraRules: []
+ # - apiGroups: [""]
+ # resources: ["services"]
+ # verbs:
+ # - create
+ # - delete
+ # - deletecollection
+ # - get
+ # - list
+ # - patch
+ # - update
+ # - watch
+
+ # coder.serviceAccount.annotations -- The Coder service account annotations.
+ annotations: {}
+ # coder.serviceAccount.name -- The service account name
+ name: coder
+
+ # coder.securityContext -- Fields related to the container's security
+ # context (as opposed to the pod). Some fields are also present in the pod
+ # security context, in which case these values will take precedence.
+ securityContext:
+ # coder.securityContext.runAsNonRoot -- Requires that the coder container
+ # runs as an unprivileged user. If setting runAsUser to 0 (root), this
+ # will need to be set to false.
+ runAsNonRoot: true
+ # coder.securityContext.runAsUser -- Sets the user id of the container.
+ # For security reasons, we recommend using a non-root user.
+ runAsUser: 1000
+ # coder.securityContext.runAsGroup -- Sets the group id of the container.
+ # For security reasons, we recommend using a non-root group.
+ runAsGroup: 1000
+ # coder.securityContext.readOnlyRootFilesystem -- Mounts the container's
+ # root filesystem as read-only.
+ readOnlyRootFilesystem: null
+ # coder.securityContext.seccompProfile -- Sets the seccomp profile for
+ # the coder container.
+ seccompProfile:
+ type: RuntimeDefault
+ # coder.securityContext.allowPrivilegeEscalation -- Controls whether
+ # the container can gain additional privileges, such as escalating to
+ # root. It is recommended to leave this setting disabled in production.
+ allowPrivilegeEscalation: false
+
+ # coder.volumes -- A list of extra volumes to add to the Coder pod.
+ volumes: []
+ # - name: "my-volume"
+ # emptyDir: {}
+
+ # coder.volumeMounts -- A list of extra volume mounts to add to the Coder pod.
+ volumeMounts: []
+ # - name: "my-volume"
+ # mountPath: "/mnt/my-volume"
+
+ # coder.tls -- The TLS configuration for Coder.
+ tls:
+ # coder.tls.secretNames -- A list of TLS server certificate secrets to mount
+ # into the Coder pod. The secrets should exist in the same namespace as the
+ # Helm deployment and should be of type "kubernetes.io/tls". The secrets
+ # will be automatically mounted into the pod if specified, and the correct
+ # "CODER_TLS_*" environment variables will be set for you.
+ secretNames: []
+
+ # coder.replicaCount -- The number of Kubernetes deployment replicas. This
+ # should only be increased if High Availability is enabled.
+ #
+ # This is an Enterprise feature. Contact sales@coder.com.
+ replicaCount: 1
+
+ # coder.workspaceProxy -- Whether or not this deployment of Coder is a Coder
+ # Workspace Proxy. Workspace Proxies reduce the latency between the user and
+ # their workspace for web connections (workspace apps and web terminal) and
+ # proxied connections from the CLI. Workspace Proxies are optional and only
+ # recommended for geographically sparse teams.
+ #
+ # Make sure you set CODER_PRIMARY_ACCESS_URL and CODER_PROXY_SESSION_TOKEN in
+ # the environment below. You can get a proxy token using the CLI:
+ # coder wsproxy create \
+ # --name "proxy-name" \
+ # --display-name "Proxy Name" \
+ # --icon "/emojis/xyz.png"
+ #
+ # This is an Enterprise feature. Contact sales@coder.com
+ # Docs: https://coder.com/docs/v2/latest/admin/workspace-proxies
+ workspaceProxy: false
+
+ # coder.lifecycle -- container lifecycle handlers for the Coder container, allowing
+ # for lifecycle events such as postStart and preStop events
+ # See: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/
+ lifecycle:
+ {}
+ # postStart:
+ # exec:
+ # command: ["/bin/sh", "-c", "echo postStart"]
+ # preStop:
+ # exec:
+ # command: ["/bin/sh","-c","echo preStart"]
+
+ # coder.resources -- The resources to request for Coder. These are optional
+ # and are not set by default.
+ resources:
+ {}
+ # limits:
+ # cpu: 2000m
+ # memory: 4096Mi
+ # requests:
+ # cpu: 2000m
+ # memory: 4096Mi
+
+ # coder.certs -- CA bundles to mount inside the Coder pod.
+ certs:
+ # coder.certs.secrets -- A list of CA bundle secrets to mount into the Coder
+ # pod. The secrets should exist in the same namespace as the Helm
+ # deployment.
+ #
+ # The given key in each secret is mounted at
+ # `/etc/ssl/certs/{secret_name}.crt`.
+ secrets:
+ []
+ # - name: "my-ca-bundle"
+ # key: "ca-bundle.crt"
+
+ # coder.affinity -- Allows specifying an affinity rule for the `coder` deployment.
+ # The default rule prefers to schedule coder pods on different
+ # nodes, which is only applicable if coder.replicaCount is greater than 1.
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/instance
+ operator: In
+ values:
+ - "coder"
+ topologyKey: kubernetes.io/hostname
+ weight: 1
+
+ # coder.tolerations -- Tolerations for tainted nodes.
+ # See: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ tolerations:
+ {}
+ # - key: "key"
+ # operator: "Equal"
+ # value: "value"
+ # effect: "NoSchedule"
+
+ # coder.nodeSelector -- Node labels for constraining coder pods to nodes.
+ # See: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+ nodeSelector: {}
+ # kubernetes.io/os: linux
+
+ # coder.service -- The Service object to expose for Coder.
+ service:
+ # coder.service.enable -- Whether to create the Service object.
+ enable: true
+ # coder.service.type -- The type of service to expose. See:
+ # https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
+ type: LoadBalancer
+ # coder.service.sessionAffinity -- Must be set to ClientIP or None
+ # AWS ELB does not support session stickiness based on ClientIP, so you must set this to None.
+ # The error message you might see: "Unsupported load balancer affinity: ClientIP"
+ # https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity
+ sessionAffinity: None
+ # coder.service.externalTrafficPolicy -- The external traffic policy to use.
+ # You may need to change this to "Local" to preserve the source IP address
+ # in some situations.
+ # https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ externalTrafficPolicy: Cluster
+ # coder.service.loadBalancerIP -- The IP address of the LoadBalancer. If not
+ # specified, a new IP will be generated each time the load balancer is
+ # recreated. It is recommended to manually create a static IP address in
+ # your cloud and specify it here in production to avoid accidental IP
+ # address changes.
+ loadBalancerIP: ""
+ # coder.service.annotations -- The service annotations. See:
+ # https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+ annotations: {}
+ # coder.service.httpNodePort -- Enabled if coder.service.type is set to
+ # NodePort. If not set, Kubernetes will allocate a port from the default
+ # range, 30000-32767.
+ httpNodePort: ""
+ # coder.service.httpsNodePort -- Enabled if coder.service.type is set to
+ # NodePort. If not set, Kubernetes will allocate a port from the default
+ # range, 30000-32767.
+ httpsNodePort: ""
+
+ # coder.ingress -- The Ingress object to expose for Coder.
+ ingress:
+ # coder.ingress.enable -- Whether to create the Ingress object. If using an
+ # Ingress, we recommend not specifying coder.tls.secretNames as the Ingress
+ # will handle TLS termination.
+ enable: false
+ # coder.ingress.className -- The name of the Ingress class to use.
+ className: ""
+ # coder.ingress.host -- The hostname to match on.
+ # Be sure to also set CODER_ACCESS_URL within coder.env[]
+ host: ""
+ # coder.ingress.wildcardHost -- The wildcard hostname to match on. Should be
+ # in the form "*.example.com" or "*-suffix.example.com". If you are using a
+ # suffix after the wildcard, the suffix will be stripped from the created
+ # ingress to ensure that it is a legal ingress host. Optional if not using
+ # applications over subdomains.
+ # Be sure to also set CODER_WILDCARD_ACCESS_URL within coder.env[]
+ wildcardHost: ""
+ # coder.ingress.annotations -- The ingress annotations.
+ annotations: {}
+ # coder.ingress.tls -- The TLS configuration to use for the Ingress.
+ tls:
+ # coder.ingress.tls.enable -- Whether to enable TLS on the Ingress.
+ enable: false
+ # coder.ingress.tls.secretName -- The name of the TLS secret to use.
+ secretName: ""
+ # coder.ingress.tls.wildcardSecretName -- The name of the TLS secret to
+ # use for the wildcard host.
+ wildcardSecretName: ""
+
+ # coder.command -- The command to use when running the Coder container. Used
+ # for customizing the location of the `coder` binary in your image.
+ command:
+ - /opt/coder
+
+ # coder.commandArgs -- Set arguments for the entrypoint command of the Coder pod.
+ commandArgs: []
+
+# provisionerDaemon -- Configuration for external provisioner daemons.
+#
+# This is an Enterprise feature. Contact sales@coder.com.
+provisionerDaemon:
+ # provisionerDaemon.pskSecretName -- The name of the Kubernetes secret that contains the
+ # Pre-Shared Key (PSK) to use to authenticate external provisioner daemons with Coder. The
+ # secret must be in the same namespace as the Helm deployment, and contain an item called "psk"
+ # which contains the pre-shared key.
+ pskSecretName: ""
+
+# extraTemplates -- Array of extra objects to deploy with the release. Strings
+# are evaluated as a template and can use template expansions and functions. All
+# other objects are used as yaml.
+extraTemplates:
+ #- |
+ # apiVersion: v1
+ # kind: ConfigMap
+ # metadata:
+ # name: my-configmap
+ # data:
+ # key: {{ .Values.myCustomValue | quote }}
diff --git a/charts/coredns/.helmignore b/charts/coredns/.helmignore
new file mode 100644
index 0000000..7c04072
--- /dev/null
+++ b/charts/coredns/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+OWNERS
diff --git a/charts/coredns/Chart.yaml b/charts/coredns/Chart.yaml
new file mode 100644
index 0000000..9cb4c9c
--- /dev/null
+++ b/charts/coredns/Chart.yaml
@@ -0,0 +1,24 @@
+annotations:
+ artifacthub.io/changes: |
+ - kind: added
+ description: Added option to override defaultMode for extraSecrets
+apiVersion: v2
+appVersion: 1.11.1
+description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS
+ Services
+home: https://coredns.io
+icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png
+keywords:
+- coredns
+- dns
+- kubedns
+maintainers:
+- name: mrueg
+- name: haad
+- name: hagaibarel
+- name: shubham-cmyk
+name: coredns
+sources:
+- https://github.com/coredns/coredns
+type: application
+version: 1.28.1
diff --git a/charts/coredns/README.md b/charts/coredns/README.md
new file mode 100644
index 0000000..b98c4a2
--- /dev/null
+++ b/charts/coredns/README.md
@@ -0,0 +1,248 @@
+# CoreDNS
+
+[CoreDNS](https://coredns.io/) is a DNS server that chains plugins and provides DNS Services
+
+# TL;DR;
+
+```console
+$ helm repo add coredns https://coredns.github.io/helm
+$ helm --namespace=kube-system install coredns coredns/coredns
+```
+
+## Introduction
+
+This chart bootstraps a [CoreDNS](https://github.com/coredns/coredns) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. This chart will provide DNS Services and can be deployed in multiple configuration to support various scenarios listed below:
+
+- CoreDNS as a cluster dns service and a drop-in replacement for Kube/SkyDNS. This is the default mode and CoreDNS is deployed as cluster-service in kube-system namespace. This mode is chosen by setting `isClusterService` to true.
+- CoreDNS as an external dns service. In this mode CoreDNS is deployed as any kubernetes app in user specified namespace. The CoreDNS service can be exposed outside the cluster by using using either the NodePort or LoadBalancer type of service. This mode is chosen by setting `isClusterService` to false.
+- CoreDNS as an external dns provider for kubernetes federation. This is a sub case of 'external dns service' which uses etcd plugin for CoreDNS backend. This deployment mode as a dependency on `etcd-operator` chart, which needs to be pre-installed.
+
+## Prerequisites
+
+- Kubernetes 1.10 or later
+
+## Installing the Chart
+
+The chart can be installed as follows:
+
+```console
+$ helm repo add coredns https://coredns.github.io/helm
+$ helm --namespace=kube-system install coredns coredns/coredns
+```
+
+The command deploys CoreDNS on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists various ways to override default configuration during deployment.
+
+> **Tip**: List all releases using `helm list --all-namespaces`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `coredns` deployment:
+
+```console
+$ helm uninstall coredns
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Configuration
+
+| Parameter | Description | Default |
+| :--------------------------------------------- | :---------------------------------------------------------------------------------------------------------------------------------------- | :----------------------------------------------------------- |
+| `image.repository` | The image repository to pull from | coredns/coredns |
+| `image.tag` | The image tag to pull from (derived from Chart.yaml) | `` |
+| `image.pullPolicy` | Image pull policy | IfNotPresent |
+| `image.pullSecrets` | Specify container image pull secrets | `[]` |
+| `replicaCount` | Number of replicas | 1 |
+| `resources.limits.cpu` | Container maximum CPU | `100m` |
+| `resources.limits.memory` | Container maximum memory | `128Mi` |
+| `resources.requests.cpu` | Container requested CPU | `100m` |
+| `resources.requests.memory` | Container requested memory | `128Mi` |
+| `serviceType` | Kubernetes Service type | `ClusterIP` |
+| `prometheus.service.enabled` | Set this to `true` to create Service for Prometheus metrics | `false` |
+| `prometheus.service.annotations` | Annotations to add to the metrics Service | `{prometheus.io/scrape: "true", prometheus.io/port: "9153"}` |
+| `prometheus.monitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` |
+| `prometheus.monitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | {} |
+| `prometheus.monitor.namespace` | Selector to select which namespaces the Endpoints objects are discovered from. | `""` |
+| `prometheus.monitor.interval` | Scrape interval for polling the metrics endpoint. (E.g. "30s") | `""` |
+| `service.clusterIP` | IP address to assign to service | `""` |
+| `service.clusterIPs` | IP addresses to assign to service | `[]` |
+| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` |
+| `service.externalIPs` | External IP addresses | [] |
+| `service.externalTrafficPolicy` | Enable client source IP preservation | [] |
+| `service.ipFamilyPolicy` | Service dual-stack policy | `""` |
+| `service.annotations` | Annotations to add to service | {} |
+| `serviceAccount.create` | If true, create & use serviceAccount | false |
+| `serviceAccount.name` | If not set & create is true, use template fullname | |
+| `rbac.create` | If true, create & use RBAC resources | true |
+| `rbac.pspEnable` | Specifies whether a PodSecurityPolicy should be created. | `false` |
+| `isClusterService` | Specifies whether chart should be deployed as cluster-service or normal k8s app. | true |
+| `priorityClassName` | Name of Priority Class to assign pods | `""` |
+| `securityContext` | securityContext definition for pods | capabilities.add.NET_BIND_SERVICE |
+| `servers` | Configuration for CoreDNS and plugins | See values.yml |
+| `livenessProbe.enabled` | Enable/disable the Liveness probe | `true` |
+| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `60` |
+| `livenessProbe.periodSeconds` | How often to perform the probe | `10` |
+| `livenessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` |
+| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` |
+| `readinessProbe.enabled` | Enable/disable the Readiness probe | `true` |
+| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
+| `readinessProbe.periodSeconds` | How often to perform the probe | `10` |
+| `readinessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` |
+| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` |
+| `affinity` | Affinity settings for pod assignment | {} |
+| `nodeSelector` | Node labels for pod assignment | {} |
+| `tolerations` | Tolerations for pod assignment | [] |
+| `zoneFiles` | Configure custom Zone files | [] |
+| `extraContainers` | Optional array of sidecar containers | [] |
+| `extraVolumes` | Optional array of volumes to create | [] |
+| `extraVolumeMounts` | Optional array of volumes to mount inside the CoreDNS container | [] |
+| `extraSecrets` | Optional array of secrets to mount inside the CoreDNS container | [] |
+| `customLabels` | Optional labels for Deployment(s), Pod, Service, ServiceMonitor objects | {} |
+| `customAnnotations` | Optional annotations for Deployment(s), Pod, Service, ServiceMonitor objects |
+| `rollingUpdate.maxUnavailable` | Maximum number of unavailable replicas during rolling update | `1` |
+| `rollingUpdate.maxSurge` | Maximum number of pods created above desired number of pods | `25%` |
+| `podDisruptionBudget` | Optional PodDisruptionBudget | {} |
+| `podAnnotations` | Optional Pod only Annotations | {} |
+| `terminationGracePeriodSeconds` | Optional duration in seconds the pod needs to terminate gracefully. | 30 |
+| `hpa.enabled` | Enable Hpa autoscaler instead of proportional one | `false` |
+| `hpa.minReplicas` | Hpa minimum number of CoreDNS replicas | `1` |
+| `hpa.maxReplicas` | Hpa maximum number of CoreDNS replicas | `2` |
+| `hpa.metrics` | Metrics definitions used by Hpa to scale up and down | {} |
+| `autoscaler.enabled` | Optionally enabled a cluster-proportional-autoscaler for CoreDNS | `false` |
+| `autoscaler.coresPerReplica` | Number of cores in the cluster per CoreDNS replica | `256` |
+| `autoscaler.nodesPerReplica` | Number of nodes in the cluster per CoreDNS replica | `16` |
+| `autoscaler.min` | Min size of replicaCount | 0 |
+| `autoscaler.max` | Max size of replicaCount | 0 (aka no max) |
+| `autoscaler.includeUnschedulableNodes` | Should the replicas scale based on the total number or only schedulable nodes | `false` |
+| `autoscaler.preventSinglePointFailure` | If true does not allow single points of failure to form | `true` |
+| `autoscaler.customFlags` | A list of custom flags to pass into cluster-proportional-autoscaler | (no args) |
+| `autoscaler.image.repository` | The image repository to pull autoscaler from | registry.k8s.io/cpa/cluster-proportional-autoscaler |
+| `autoscaler.image.tag` | The image tag to pull autoscaler from | `1.8.5` |
+| `autoscaler.image.pullPolicy` | Image pull policy for the autoscaler | IfNotPresent |
+| `autoscaler.image.pullSecrets` | Specify container image pull secrets | `[]` |
+| `autoscaler.priorityClassName` | Optional priority class for the autoscaler pod. `priorityClassName` used if not set. | `""` |
+| `autoscaler.affinity` | Affinity settings for pod assignment for autoscaler | {} |
+| `autoscaler.nodeSelector` | Node labels for pod assignment for autoscaler | {} |
+| `autoscaler.tolerations` | Tolerations for pod assignment for autoscaler | [] |
+| `autoscaler.resources.limits.cpu` | Container maximum CPU for cluster-proportional-autoscaler | `20m` |
+| `autoscaler.resources.limits.memory` | Container maximum memory for cluster-proportional-autoscaler | `10Mi` |
+| `autoscaler.resources.requests.cpu` | Container requested CPU for cluster-proportional-autoscaler | `20m` |
+| `autoscaler.resources.requests.memory` | Container requested memory for cluster-proportional-autoscaler | `10Mi` |
+| `autoscaler.configmap.annotations` | Annotations to add to autoscaler config map. For example to stop CI renaming them | {} |
+| `autoscaler.livenessProbe.enabled` | Enable/disable the Liveness probe | `true` |
+| `autoscaler.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `10` |
+| `autoscaler.livenessProbe.periodSeconds` | How often to perform the probe | `5` |
+| `autoscaler.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `autoscaler.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `3` |
+| `autoscaler.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` |
+| `autoscaler.extraContainers` | Optional array of sidecar containers | [] |
+| `deployment.enabled` | Optionally disable the main deployment and its respective resources. | `true` |
+| `deployment.name` | Name of the deployment if `deployment.enabled` is true. Otherwise the name of an existing deployment for the autoscaler or HPA to target. | `""` |
+| `deployment.annotations` | Annotations to add to the main deployment | `{}` |
+
+See `values.yaml` for configuration notes. Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+$ helm install coredns \
+ coredns/coredns \
+ --set rbac.create=false
+```
+
+The above command disables automatic creation of RBAC rules.
+
+Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
+
+```console
+$ helm install coredns coredns/coredns -f values.yaml
+```
+
+> **Tip**: You can use the default [values.yaml](/charts/coredns/values.yaml)
+
+## Caveats
+
+The chart will automatically determine which protocols to listen on based on
+the protocols you define in your zones. This means that you could potentially
+use both "TCP" and "UDP" on a single port.
+Some cloud environments like "GCE" or "Azure container service" cannot
+create external loadbalancers with both "TCP" and "UDP" protocols. So
+When deploying CoreDNS with `serviceType="LoadBalancer"` on such cloud
+environments, make sure you do not attempt to use both protocols at the same
+time.
+
+## Autoscaling
+
+By setting `autoscaler.enabled = true` a
+[cluster-proportional-autoscaler](https://github.com/kubernetes-incubator/cluster-proportional-autoscaler)
+will be deployed. This will default to a coredns replica for every 256 cores, or
+16 nodes in the cluster. These can be changed with `autoscaler.coresPerReplica`
+and `autoscaler.nodesPerReplica`. When cluster is using large nodes (with more
+cores), `coresPerReplica` should dominate. If using small nodes,
+`nodesPerReplica` should dominate.
+
+This also creates a ServiceAccount, ClusterRole, and ClusterRoleBinding for
+the autoscaler deployment.
+
+`replicaCount` is ignored if this is enabled.
+
+By setting `hpa.enabled = true` a [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/)
+is enabled for Coredns deployment. This can scale number of replicas based on meitrics
+like CpuUtilization, MemoryUtilization or Custom ones.
+
+## Adopting existing CoreDNS resources
+
+If you do not want to delete the existing CoreDNS resources in your cluster, you can adopt the resources into a release as of Helm 3.2.0.
+
+You will also need to annotate and label your existing resources to allow Helm to assume control of them. See: https://github.com/helm/helm/pull/7649
+
+```
+annotations:
+ meta.helm.sh/release-name: your-release-name
+ meta.helm.sh/release-namespace: your-release-namespace
+label:
+ app.kubernetes.io/managed-by: Helm
+```
+
+Once you have annotated and labeled all the resources this chart specifies, you may need to locally template the chart and compare against existing manifest to ensure there are no changes/diffs.s If
+you have been careful this should not diff and leave all the resources unmodified and now under management of helm.
+
+Some values to investigate to help adopt your existing manifests to the Helm release are:
+
+- k8sAppLabelOverride
+- service.name
+- customLabels
+
+In some cases, you will need to orphan delete your existing deployment since selector labels are immutable.
+
+```
+kubectl delete deployment coredns --cascade=orphan
+```
+
+This will delete the deployment and leave the replicaset to ensure no downtime in the cluster. You will need to manually delete the replicaset AFTER Helm has released a new deployment.
+
+Here is an example script to modify the annotations and labels of existing resources:
+
+WARNING: Substitute YOUR_HELM_RELEASE_NAME_HERE with the name of your helm release.
+
+```
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+for kind in config service serviceAccount; do
+ echo "setting annotations and labels on $kind/coredns"
+ kubectl -n kube-system annotate --overwrite $kind coredns meta.helm.sh/release-name=YOUR_HELM_RELEASE_NAME_HERE
+ kubectl -n kube-system annotate --overwrite $kind coredns meta.helm.sh/release-namespace=kube-system
+ kubectl -n kube-system label --overwrite $kind coredns app.kubernetes.io/managed-by=Helm
+done
+```
+
+NOTE: Sometimes, previous deployments of kube-dns that have been migrated to CoreDNS still use kube-dns for the service name as well.
+
+```
+echo "setting annotations and labels on service/kube-dns"
+kubectl -n kube-system annotate --overwrite service kube-dns meta.helm.sh/release-name=YOUR_HELM_RELEASE_NAME_HERE
+kubectl -n kube-system annotate --overwrite service kube-dns meta.helm.sh/release-namespace=kube-system
+kubectl -n kube-system label --overwrite service kube-dns app.kubernetes.io/managed-by=Helm
+```
diff --git a/charts/coredns/templates/NOTES.txt b/charts/coredns/templates/NOTES.txt
new file mode 100644
index 0000000..3a1883b
--- /dev/null
+++ b/charts/coredns/templates/NOTES.txt
@@ -0,0 +1,30 @@
+{{- if .Values.isClusterService }}
+CoreDNS is now running in the cluster as a cluster-service.
+{{- else }}
+CoreDNS is now running in the cluster.
+It can be accessed using the below endpoint
+{{- if contains "NodePort" .Values.serviceType }}
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "coredns.fullname" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo "$NODE_IP:$NODE_PORT"
+{{- else if contains "LoadBalancer" .Values.serviceType }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status by running 'kubectl get svc -w {{ template "coredns.fullname" . }}'
+
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "coredns.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ echo $SERVICE_IP
+{{- else if contains "ClusterIP" .Values.serviceType }}
+ "{{ template "coredns.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local"
+ from within the cluster
+{{- end }}
+{{- end }}
+
+It can be tested with the following:
+
+1. Launch a Pod with DNS tools:
+
+kubectl run -it --rm --restart=Never --image=infoblox/dnstools:latest dnstools
+
+2. Query the DNS server:
+
+/ # host kubernetes
diff --git a/charts/coredns/templates/_helpers.tpl b/charts/coredns/templates/_helpers.tpl
new file mode 100644
index 0000000..9c50032
--- /dev/null
+++ b/charts/coredns/templates/_helpers.tpl
@@ -0,0 +1,222 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "coredns.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "coredns.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "coredns.labels" -}}
+app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
+app.kubernetes.io/instance: {{ .Release.Name | quote }}
+helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+{{- if .Values.isClusterService }}
+k8s-app: {{ template "coredns.k8sapplabel" . }}
+kubernetes.io/cluster-service: "true"
+kubernetes.io/name: "CoreDNS"
+{{- end }}
+app.kubernetes.io/name: {{ template "coredns.name" . }}
+{{- end -}}
+
+{{/*
+Common labels with autoscaler
+*/}}
+{{- define "coredns.labels.autoscaler" -}}
+app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
+app.kubernetes.io/instance: {{ .Release.Name | quote }}
+helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+{{- if .Values.isClusterService }}
+k8s-app: {{ template "coredns.k8sapplabel" . }}-autoscaler
+kubernetes.io/cluster-service: "true"
+kubernetes.io/name: "CoreDNS"
+{{- end }}
+app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler
+{{- end -}}
+
+{{/*
+Allow k8s-app label to be overridden
+*/}}
+{{- define "coredns.k8sapplabel" -}}
+{{- default .Chart.Name .Values.k8sAppLabelOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Generate the list of ports automatically from the server definitions
+*/}}
+{{- define "coredns.servicePorts" -}}
+ {{/* Set ports to be an empty dict */}}
+ {{- $ports := dict -}}
+ {{/* Iterate through each of the server blocks */}}
+ {{- range .Values.servers -}}
+ {{/* Capture port to avoid scoping awkwardness */}}
+ {{- $port := toString .port -}}
+
+ {{/* If none of the server blocks has mentioned this port yet take note of it */}}
+ {{- if not (hasKey $ports $port) -}}
+ {{- $ports := set $ports $port (dict "istcp" false "isudp" false) -}}
+ {{- end -}}
+ {{/* Retrieve the inner dict that holds the protocols for a given port */}}
+ {{- $innerdict := index $ports $port -}}
+
+ {{/*
+ Look at each of the zones and check which protocol they serve
+ At the moment the following are supported by CoreDNS:
+ UDP: dns://
+ TCP: tls://, grpc://
+ */}}
+ {{- range .zones -}}
+ {{- if has (default "" .scheme) (list "dns://") -}}
+ {{/* Optionally enable tcp for this service as well */}}
+ {{- if eq (default false .use_tcp) true }}
+ {{- $innerdict := set $innerdict "istcp" true -}}
+ {{- end }}
+ {{- $innerdict := set $innerdict "isudp" true -}}
+ {{- end -}}
+
+ {{- if has (default "" .scheme) (list "tls://" "grpc://") -}}
+ {{- $innerdict := set $innerdict "istcp" true -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{/* If none of the zones specify scheme, default to dns:// on both tcp & udp */}}
+ {{- if and (not (index $innerdict "istcp")) (not (index $innerdict "isudp")) -}}
+ {{- $innerdict := set $innerdict "isudp" true -}}
+ {{- $innerdict := set $innerdict "istcp" true -}}
+ {{- end -}}
+
+ {{- if .nodePort -}}
+ {{- $innerdict := set $innerdict "nodePort" .nodePort -}}
+ {{- end -}}
+
+ {{/* Write the dict back into the outer dict */}}
+ {{- $ports := set $ports $port $innerdict -}}
+ {{- end -}}
+
+ {{/* Write out the ports according to the info collected above */}}
+ {{- range $port, $innerdict := $ports -}}
+ {{- $portList := list -}}
+ {{- if index $innerdict "isudp" -}}
+ {{- $portList = append $portList (dict "port" ($port | int) "protocol" "UDP" "name" (printf "udp-%s" $port)) -}}
+ {{- end -}}
+ {{- if index $innerdict "istcp" -}}
+ {{- $portList = append $portList (dict "port" ($port | int) "protocol" "TCP" "name" (printf "tcp-%s" $port)) -}}
+ {{- end -}}
+
+ {{- range $portDict := $portList -}}
+ {{- if index $innerdict "nodePort" -}}
+ {{- $portDict := set $portDict "nodePort" (get $innerdict "nodePort" | int) -}}
+ {{- end -}}
+
+ {{- printf "- %s\n" (toJson $portDict) -}}
+ {{- end -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Generate the list of ports automatically from the server definitions
+*/}}
+{{- define "coredns.containerPorts" -}}
+ {{/* Set ports to be an empty dict */}}
+ {{- $ports := dict -}}
+ {{/* Iterate through each of the server blocks */}}
+ {{- range .Values.servers -}}
+ {{/* Capture port to avoid scoping awkwardness */}}
+ {{- $port := toString .port -}}
+
+ {{/* If none of the server blocks has mentioned this port yet take note of it */}}
+ {{- if not (hasKey $ports $port) -}}
+ {{- $ports := set $ports $port (dict "istcp" false "isudp" false) -}}
+ {{- end -}}
+ {{/* Retrieve the inner dict that holds the protocols for a given port */}}
+ {{- $innerdict := index $ports $port -}}
+
+ {{/*
+ Look at each of the zones and check which protocol they serve
+ At the moment the following are supported by CoreDNS:
+ UDP: dns://
+ TCP: tls://, grpc://
+ */}}
+ {{- range .zones -}}
+ {{- if has (default "" .scheme) (list "dns://") -}}
+ {{/* Optionally enable tcp for this service as well */}}
+ {{- if eq (default false .use_tcp) true }}
+ {{- $innerdict := set $innerdict "istcp" true -}}
+ {{- end }}
+ {{- $innerdict := set $innerdict "isudp" true -}}
+ {{- end -}}
+
+ {{- if has (default "" .scheme) (list "tls://" "grpc://") -}}
+ {{- $innerdict := set $innerdict "istcp" true -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{/* If none of the zones specify scheme, default to dns:// on both tcp & udp */}}
+ {{- if and (not (index $innerdict "istcp")) (not (index $innerdict "isudp")) -}}
+ {{- $innerdict := set $innerdict "isudp" true -}}
+ {{- $innerdict := set $innerdict "istcp" true -}}
+ {{- end -}}
+
+ {{- if .hostPort -}}
+ {{- $innerdict := set $innerdict "hostPort" .hostPort -}}
+ {{- end -}}
+
+ {{/* Write the dict back into the outer dict */}}
+ {{- $ports := set $ports $port $innerdict -}}
+
+ {{/* Fetch port from the configuration if the prometheus section exists */}}
+ {{- range .plugins -}}
+ {{- if eq .name "prometheus" -}}
+ {{- $prometheus_addr := toString .parameters -}}
+ {{- $prometheus_addr_list := regexSplit ":" $prometheus_addr -1 -}}
+ {{- $prometheus_port := index $prometheus_addr_list 1 -}}
+ {{- $ports := set $ports $prometheus_port (dict "istcp" true "isudp" false) -}}
+ {{- end -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{/* Write out the ports according to the info collected above */}}
+ {{- range $port, $innerdict := $ports -}}
+ {{- $portList := list -}}
+ {{- if index $innerdict "isudp" -}}
+ {{- $portList = append $portList (dict "containerPort" ($port | int) "protocol" "UDP" "name" (printf "udp-%s" $port)) -}}
+ {{- end -}}
+ {{- if index $innerdict "istcp" -}}
+ {{- $portList = append $portList (dict "containerPort" ($port | int) "protocol" "TCP" "name" (printf "tcp-%s" $port)) -}}
+ {{- end -}}
+
+ {{- range $portDict := $portList -}}
+ {{- if index $innerdict "hostPort" -}}
+ {{- $portDict := set $portDict "hostPort" (get $innerdict "hostPort" | int) -}}
+ {{- end -}}
+
+ {{- printf "- %s\n" (toJson $portDict) -}}
+ {{- end -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "coredns.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "coredns.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
diff --git a/charts/coredns/templates/clusterrole-autoscaler.yaml b/charts/coredns/templates/clusterrole-autoscaler.yaml
new file mode 100644
index 0000000..9bf57d2
--- /dev/null
+++ b/charts/coredns/templates/clusterrole-autoscaler.yaml
@@ -0,0 +1,30 @@
+{{- if and .Values.autoscaler.enabled .Values.rbac.create }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "coredns.fullname" . }}-autoscaler
+ labels: {{- include "coredns.labels.autoscaler" . | nindent 4 }}
+{{- if .Values.customLabels }}
+{{ toYaml .Values.customLabels | indent 4 }}
+{{- end }}
+{{- with .Values.customAnnotations }}
+ annotations:
+{{- toYaml . | nindent 4 }}
+{{- end }}
+rules:
+ - apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["list","watch"]
+ - apiGroups: [""]
+ resources: ["replicationcontrollers/scale"]
+ verbs: ["get", "update"]
+ - apiGroups: ["extensions", "apps"]
+ resources: ["deployments/scale", "replicasets/scale"]
+ verbs: ["get", "update"]
+# Remove the configmaps rule once below issue is fixed:
+# kubernetes-incubator/cluster-proportional-autoscaler#16
+ - apiGroups: [""]
+ resources: ["configmaps"]
+ verbs: ["get", "create"]
+{{- end }}
diff --git a/charts/coredns/templates/clusterrole.yaml b/charts/coredns/templates/clusterrole.yaml
new file mode 100644
index 0000000..c33762c
--- /dev/null
+++ b/charts/coredns/templates/clusterrole.yaml
@@ -0,0 +1,36 @@
+{{- if and .Values.deployment.enabled .Values.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "coredns.fullname" . }}
+ labels: {{- include "coredns.labels" . | nindent 4 }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - endpoints
+ - services
+ - pods
+ - namespaces
+ verbs:
+ - list
+ - watch
+- apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - list
+ - watch
+{{- if .Values.rbac.pspEnable }}
+- apiGroups:
+ - policy
+ - extensions
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
+ resourceNames:
+ - {{ template "coredns.fullname" . }}
+{{- end }}
+{{- end }}
diff --git a/charts/coredns/templates/clusterrolebinding-autoscaler.yaml b/charts/coredns/templates/clusterrolebinding-autoscaler.yaml
new file mode 100644
index 0000000..ef32306
--- /dev/null
+++ b/charts/coredns/templates/clusterrolebinding-autoscaler.yaml
@@ -0,0 +1,23 @@
+{{- if and .Values.autoscaler.enabled .Values.rbac.create }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "coredns.fullname" . }}-autoscaler
+ labels: {{- include "coredns.labels.autoscaler" . | nindent 4 }}
+{{- if .Values.customLabels }}
+{{ toYaml .Values.customLabels | indent 4 }}
+{{- end }}
+{{- with .Values.customAnnotations }}
+ annotations:
+{{- toYaml . | nindent 4 }}
+{{- end }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "coredns.fullname" . }}-autoscaler
+subjects:
+- kind: ServiceAccount
+ name: {{ template "coredns.fullname" . }}-autoscaler
+ namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/charts/coredns/templates/clusterrolebinding.yaml b/charts/coredns/templates/clusterrolebinding.yaml
new file mode 100644
index 0000000..36fa21c
--- /dev/null
+++ b/charts/coredns/templates/clusterrolebinding.yaml
@@ -0,0 +1,15 @@
+{{- if and .Values.deployment.enabled .Values.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "coredns.fullname" . }}
+ labels: {{- include "coredns.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "coredns.fullname" . }}
+subjects:
+- kind: ServiceAccount
+ name: {{ template "coredns.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/charts/coredns/templates/configmap-autoscaler.yaml b/charts/coredns/templates/configmap-autoscaler.yaml
new file mode 100644
index 0000000..b10eb59
--- /dev/null
+++ b/charts/coredns/templates/configmap-autoscaler.yaml
@@ -0,0 +1,33 @@
+{{- if .Values.autoscaler.enabled }}
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: {{ template "coredns.fullname" . }}-autoscaler
+ namespace: {{ .Release.Namespace }}
+ labels: {{- include "coredns.labels.autoscaler" . | nindent 4 }}
+ {{- if .Values.customLabels }}
+ {{- toYaml .Values.customLabels | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.autoscaler.configmap.annotations .Values.customAnnotations }}
+ annotations:
+ {{- if .Values.customAnnotations }}
+ {{- toYaml .Values.customAnnotations | nindent 4 }}
+ {{- end }}
+ {{- if .Values.autoscaler.configmap.annotations -}}
+ {{ toYaml .Values.autoscaler.configmap.annotations | nindent 4 }}
+ {{- end }}
+ {{- end }}
+data:
+ # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
+ # If using small nodes, "nodesPerReplica" should dominate.
+ linear: |-
+ {
+ "coresPerReplica": {{ .Values.autoscaler.coresPerReplica | float64 }},
+ "nodesPerReplica": {{ .Values.autoscaler.nodesPerReplica | float64 }},
+ "preventSinglePointFailure": {{ .Values.autoscaler.preventSinglePointFailure }},
+ "min": {{ .Values.autoscaler.min | int }},
+ "max": {{ .Values.autoscaler.max | int }},
+ "includeUnschedulableNodes": {{ .Values.autoscaler.includeUnschedulableNodes }}
+ }
+{{- end }}
diff --git a/charts/coredns/templates/configmap.yaml b/charts/coredns/templates/configmap.yaml
new file mode 100644
index 0000000..fe2a262
--- /dev/null
+++ b/charts/coredns/templates/configmap.yaml
@@ -0,0 +1,34 @@
+{{- if .Values.deployment.enabled }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "coredns.fullname" . }}
+ labels: {{- include "coredns.labels" . | nindent 4 }}
+{{- if .Values.customLabels }}
+{{ toYaml .Values.customLabels | indent 4 }}
+{{- end }}
+{{- with .Values.customAnnotations }}
+ annotations:
+{{- toYaml . | nindent 4 }}
+{{- end }}
+data:
+ Corefile: |-
+ {{- range $name, $conf := .Values.extraConfig }}
+ {{ $name }}{{ if $conf.parameters }} {{ $conf.parameters }}{{ end }}
+ {{- end }}
+ {{ range .Values.servers }}
+ {{- range $idx, $zone := .zones }}{{ if $idx }} {{ else }}{{ end }}{{ default "" $zone.scheme }}{{ default "." $zone.zone }}{{ else }}.{{ end -}}
+ {{- if .port }}:{{ .port }} {{ end -}}
+ {
+ {{- range .plugins }}
+ {{ .name }}{{ if .parameters }} {{ .parameters }}{{ end }}{{ if .configBlock }} {
+{{ .configBlock | indent 12 }}
+ }{{ end }}
+ {{- end }}
+ }
+ {{ end }}
+ {{- range .Values.zoneFiles }}
+ {{ .filename }}: {{ toYaml .contents | indent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/coredns/templates/deployment-autoscaler.yaml b/charts/coredns/templates/deployment-autoscaler.yaml
new file mode 100644
index 0000000..7de5c06
--- /dev/null
+++ b/charts/coredns/templates/deployment-autoscaler.yaml
@@ -0,0 +1,98 @@
+{{- if and (.Values.autoscaler.enabled) (not .Values.hpa.enabled) }}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ template "coredns.fullname" . }}-autoscaler
+ namespace: {{ .Release.Namespace }}
+ labels: {{- include "coredns.labels.autoscaler" . | nindent 4 }}
+{{- if .Values.customLabels }}
+{{ toYaml .Values.customLabels | indent 4 }}
+{{- end }}
+{{- with .Values.customAnnotations }}
+ annotations:
+{{- toYaml . | nindent 4 }}
+{{- end }}
+spec:
+ selector:
+ matchLabels:
+ app.kubernetes.io/instance: {{ .Release.Name | quote }}
+ {{- if .Values.isClusterService }}
+ k8s-app: {{ template "coredns.k8sapplabel" . }}-autoscaler
+ {{- end }}
+ app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler
+ template:
+ metadata:
+ labels:
+ {{- if .Values.isClusterService }}
+ {{- if not (hasKey .Values.customLabels "k8s-app")}}
+ k8s-app: {{ template "coredns.k8sapplabel" . }}-autoscaler
+ {{- end }}
+ {{- end }}
+ app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler
+ app.kubernetes.io/instance: {{ .Release.Name | quote }}
+ {{- if .Values.customLabels }}
+ {{ toYaml .Values.customLabels | nindent 8 }}
+ {{- end }}
+ annotations:
+ checksum/configmap: {{ include (print $.Template.BasePath "/configmap-autoscaler.yaml") . | sha256sum }}
+ {{- if .Values.isClusterService }}
+ scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
+ {{- end }}
+ {{- with .Values.autoscaler.podAnnotations }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ spec:
+ serviceAccountName: {{ template "coredns.fullname" . }}-autoscaler
+ {{- $priorityClassName := default .Values.priorityClassName .Values.autoscaler.priorityClassName }}
+ {{- if $priorityClassName }}
+ priorityClassName: {{ $priorityClassName | quote }}
+ {{- end }}
+ {{- if .Values.autoscaler.affinity }}
+ affinity:
+{{ toYaml .Values.autoscaler.affinity | indent 8 }}
+ {{- end }}
+ {{- if .Values.autoscaler.tolerations }}
+ tolerations:
+{{ toYaml .Values.autoscaler.tolerations | indent 8 }}
+ {{- end }}
+ {{- if .Values.autoscaler.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.autoscaler.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if not (empty .Values.autoscaler.image.pullSecrets) }}
+ imagePullSecrets:
+{{ toYaml .Values.autoscaler.image.pullSecrets | indent 8 }}
+ {{- end }}
+ containers:
+ - name: autoscaler
+ image: "{{ .Values.autoscaler.image.repository }}:{{ .Values.autoscaler.image.tag }}"
+ imagePullPolicy: {{ .Values.autoscaler.image.pullPolicy }}
+ resources:
+{{ toYaml .Values.autoscaler.resources | indent 10 }}
+ {{- if .Values.autoscaler.livenessProbe.enabled }}
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: {{ .Values.autoscaler.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.autoscaler.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.autoscaler.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.autoscaler.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.autoscaler.livenessProbe.failureThreshold }}
+ {{- end }}
+ command:
+ - /cluster-proportional-autoscaler
+ - --namespace={{ .Release.Namespace }}
+ - --configmap={{ template "coredns.fullname" . }}-autoscaler
+ - --target=Deployment/{{ default (include "coredns.fullname" .) .Values.deployment.name }}
+ - --logtostderr=true
+ - --v=2
+ {{- if .Values.autoscaler.customFlags }}
+{{ toYaml .Values.autoscaler.customFlags | indent 10 }}
+ {{- end }}
+{{- if .Values.autoscaler.extraContainers }}
+{{ toYaml .Values.autoscaler.extraContainers | indent 6 }}
+{{- end }}
+{{- end }}
diff --git a/charts/coredns/templates/deployment.yaml b/charts/coredns/templates/deployment.yaml
new file mode 100644
index 0000000..7eb90bc
--- /dev/null
+++ b/charts/coredns/templates/deployment.yaml
@@ -0,0 +1,161 @@
+{{- if .Values.deployment.enabled }}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ default (include "coredns.fullname" .) .Values.deployment.name }}
+ labels: {{- include "coredns.labels" . | nindent 4 }}
+ app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- if .Values.customLabels }}
+{{ toYaml .Values.customLabels | indent 4 }}
+{{- end }}
+ {{- if or .Values.deployment.annotations .Values.customAnnotations }}
+ annotations:
+ {{- if .Values.customAnnotations }}
+ {{- toYaml .Values.customAnnotations | nindent 4 }}
+ {{- end }}
+ {{- if .Values.deployment.annotations }}
+ {{- toYaml .Values.deployment.annotations | nindent 4 }}
+ {{- end }}
+ {{- end }}
+spec:
+ {{- if and (not .Values.autoscaler.enabled) (not .Values.hpa.enabled) }}
+ replicas: {{ .Values.replicaCount }}
+ {{- end }}
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: {{ .Values.rollingUpdate.maxUnavailable }}
+ maxSurge: {{ .Values.rollingUpdate.maxSurge }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/instance: {{ .Release.Name | quote }}
+ {{- if .Values.isClusterService }}
+ k8s-app: {{ template "coredns.k8sapplabel" . }}
+ {{- end }}
+ app.kubernetes.io/name: {{ template "coredns.name" . }}
+ template:
+ metadata:
+ labels:
+ {{- if .Values.isClusterService }}
+ k8s-app: {{ template "coredns.k8sapplabel" . }}
+ {{- end }}
+ app.kubernetes.io/name: {{ template "coredns.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name | quote }}
+{{- if .Values.customLabels }}
+{{ toYaml .Values.customLabels | indent 8 }}
+{{- end }}
+ annotations:
+ checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
+ {{- if .Values.isClusterService }}
+ scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
+ {{- end }}
+{{- if .Values.podAnnotations }}
+{{ toYaml .Values.podAnnotations | indent 8 }}
+{{- end }}
+ spec:
+ {{- if .Values.podSecurityContext }}
+ securityContext: {{ toYaml .Values.podSecurityContext | nindent 8 }}
+ {{- end }}
+ {{- if .Values.terminationGracePeriodSeconds }}
+ terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
+ {{- end }}
+ serviceAccountName: {{ template "coredns.serviceAccountName" . }}
+ {{- if .Values.priorityClassName }}
+ priorityClassName: {{ .Values.priorityClassName | quote }}
+ {{- end }}
+ {{- if .Values.isClusterService }}
+ dnsPolicy: Default
+ {{- end }}
+ {{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+ {{- end }}
+ {{- if .Values.topologySpreadConstraints }}
+ topologySpreadConstraints:
+{{ tpl (toYaml .Values.topologySpreadConstraints) $ | indent 8 }}
+ {{- end }}
+ {{- if .Values.tolerations }}
+ tolerations:
+{{ toYaml .Values.tolerations | indent 8 }}
+ {{- end }}
+ {{- if .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if not (empty .Values.image.pullSecrets) }}
+ imagePullSecrets:
+{{ toYaml .Values.image.pullSecrets | indent 8 }}
+ {{- end }}
+ containers:
+ - name: "coredns"
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ args: [ "-conf", "/etc/coredns/Corefile" ]
+ volumeMounts:
+ - name: config-volume
+ mountPath: /etc/coredns
+{{- range .Values.extraSecrets }}
+ - name: {{ .name }}
+ mountPath: {{ .mountPath }}
+ readOnly: true
+{{- end }}
+{{- if .Values.extraVolumeMounts }}
+{{- toYaml .Values.extraVolumeMounts | nindent 8}}
+{{- end }}
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+ ports:
+{{ include "coredns.containerPorts" . | indent 8 }}
+ {{- if .Values.livenessProbe.enabled }}
+ livenessProbe:
+ httpGet:
+ path: /health
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
+ {{- end }}
+ {{- if .Values.readinessProbe.enabled }}
+ readinessProbe:
+ httpGet:
+ path: /ready
+ port: 8181
+ scheme: HTTP
+ initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
+ {{- end }}
+{{- if .Values.securityContext }}
+ securityContext:
+{{- toYaml .Values.securityContext | nindent 10 }}
+{{- end }}
+{{- if .Values.extraContainers }}
+{{ toYaml .Values.extraContainers | indent 6 }}
+{{- end }}
+ volumes:
+ - name: config-volume
+ configMap:
+ name: {{ template "coredns.fullname" . }}
+ items:
+ - key: Corefile
+ path: Corefile
+ {{ range .Values.zoneFiles }}
+ - key: {{ .filename }}
+ path: {{ .filename }}
+ {{ end }}
+{{- range .Values.extraSecrets }}
+ - name: {{ .name }}
+ secret:
+ secretName: {{ .name }}
+ defaultMode: {{ default 400 .defaultMode }}
+{{- end }}
+{{- if .Values.extraVolumes }}
+{{ toYaml .Values.extraVolumes | indent 8 }}
+{{- end }}
+{{- end }}
diff --git a/charts/coredns/templates/hpa.yaml b/charts/coredns/templates/hpa.yaml
new file mode 100644
index 0000000..57a476b
--- /dev/null
+++ b/charts/coredns/templates/hpa.yaml
@@ -0,0 +1,32 @@
+{{- if and (.Values.hpa.enabled) (not .Values.autoscaler.enabled) }}
+---
+{{- if .Capabilities.APIVersions.Has "autoscaling/v2" }}
+apiVersion: autoscaling/v2
+{{- else }}
+apiVersion: autoscaling/v2beta2
+{{- end }}
+kind: HorizontalPodAutoscaler
+metadata:
+ name: {{ template "coredns.fullname" . }}
+ labels: {{- include "coredns.labels" . | nindent 4 }}
+{{- if .Values.customLabels }}
+{{ toYaml .Values.customLabels | indent 4 }}
+{{- end }}
+{{- with .Values.customAnnotations }}
+ annotations:
+{{- toYaml . | nindent 4 }}
+{{- end }}
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: {{ default (include "coredns.fullname" .) .Values.deployment.name }}
+ minReplicas: {{ .Values.hpa.minReplicas }}
+ maxReplicas: {{ .Values.hpa.maxReplicas }}
+ metrics:
+{{ toYaml .Values.hpa.metrics | indent 4 }}
+{{- if .Values.hpa.behavior }}
+ behavior:
+{{ toYaml .Values.hpa.behavior | indent 4 }}
+{{- end }}
+{{- end }}
diff --git a/charts/coredns/templates/poddisruptionbudget.yaml b/charts/coredns/templates/poddisruptionbudget.yaml
new file mode 100644
index 0000000..272bc50
--- /dev/null
+++ b/charts/coredns/templates/poddisruptionbudget.yaml
@@ -0,0 +1,23 @@
+{{- if and .Values.deployment.enabled .Values.podDisruptionBudget -}}
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: {{ template "coredns.fullname" . }}
+ labels: {{- include "coredns.labels" . | nindent 4 }}
+{{- if .Values.customLabels }}
+{{ toYaml .Values.customLabels | indent 4 }}
+{{- end }}
+{{- with .Values.customAnnotations }}
+ annotations:
+{{- toYaml . | nindent 4 }}
+{{- end }}
+spec:
+ selector:
+ matchLabels:
+ app.kubernetes.io/instance: {{ .Release.Name | quote }}
+ {{- if .Values.isClusterService }}
+ k8s-app: {{ template "coredns.k8sapplabel" . }}
+ {{- end }}
+ app.kubernetes.io/name: {{ template "coredns.name" . }}
+{{ toYaml .Values.podDisruptionBudget | indent 2 }}
+{{- end }}
diff --git a/charts/coredns/templates/podsecuritypolicy.yaml b/charts/coredns/templates/podsecuritypolicy.yaml
new file mode 100644
index 0000000..6e02e00
--- /dev/null
+++ b/charts/coredns/templates/podsecuritypolicy.yaml
@@ -0,0 +1,47 @@
+{{- if and .Values.deployment.enabled .Values.rbac.pspEnable }}
+{{ if .Capabilities.APIVersions.Has "policy/v1beta1" }}
+apiVersion: policy/v1beta1
+{{ else }}
+apiVersion: extensions/v1beta1
+{{ end -}}
+kind: PodSecurityPolicy
+metadata:
+ name: {{ template "coredns.fullname" . }}
+ labels: {{- include "coredns.labels" . | nindent 4 }}
+spec:
+ privileged: false
+ # Required to prevent escalations to root.
+ allowPrivilegeEscalation: false
+ # Add back CAP_NET_BIND_SERVICE so that coredns can run on port 53
+ allowedCapabilities:
+ - NET_BIND_SERVICE
+ # Allow core volume types.
+ volumes:
+ - 'configMap'
+ - 'emptyDir'
+ - 'projected'
+ - 'secret'
+ - 'downwardAPI'
+ hostNetwork: false
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ # Require the container to run without root privileges.
+ rule: 'RunAsAny'
+ seLinux:
+ # This policy assumes the nodes are using AppArmor rather than SELinux.
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ # Forbid adding the root group.
+ - min: 1
+ max: 65535
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ # Forbid adding the root group.
+ - min: 1
+ max: 65535
+ readOnlyRootFilesystem: false
+{{- end }}
diff --git a/charts/coredns/templates/service-metrics.yaml b/charts/coredns/templates/service-metrics.yaml
new file mode 100644
index 0000000..42dadbb
--- /dev/null
+++ b/charts/coredns/templates/service-metrics.yaml
@@ -0,0 +1,34 @@
+{{- if and .Values.deployment.enabled .Values.prometheus.service.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "coredns.fullname" . }}-metrics
+ labels: {{- include "coredns.labels" . | nindent 4 }}
+ app.kubernetes.io/component: metrics
+{{- if .Values.customLabels }}
+{{ toYaml .Values.customLabels | indent 4 }}
+{{- end }}
+ {{- if or .Values.prometheus.service.annotations .Values.service.annotations .Values.customAnnotations }}
+ annotations:
+ {{- if .Values.prometheus.service.annotations }}
+ {{- toYaml .Values.prometheus.service.annotations | nindent 4 }}
+ {{- end }}
+ {{- if .Values.service.annotations }}
+ {{- toYaml .Values.service.annotations | nindent 4 }}
+ {{- end }}
+ {{- if .Values.customAnnotations }}
+ {{- toYaml .Values.customAnnotations | nindent 4 }}
+ {{- end }}
+ {{- end }}
+spec:
+ selector:
+ app.kubernetes.io/instance: {{ .Release.Name | quote }}
+ {{- if .Values.isClusterService }}
+ k8s-app: {{ template "coredns.k8sapplabel" . }}
+ {{- end }}
+ app.kubernetes.io/name: {{ template "coredns.name" . }}
+ ports:
+ - name: metrics
+ port: 9153
+ targetPort: 9153
+{{- end }}
diff --git a/charts/coredns/templates/service.yaml b/charts/coredns/templates/service.yaml
new file mode 100644
index 0000000..7bd5c80
--- /dev/null
+++ b/charts/coredns/templates/service.yaml
@@ -0,0 +1,50 @@
+{{- if .Values.deployment.enabled }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ default (include "coredns.fullname" .) .Values.service.name }}
+ labels: {{- include "coredns.labels" . | nindent 4 }}
+{{- if .Values.customLabels }}
+{{ toYaml .Values.customLabels | indent 4 }}
+{{- end }}
+ {{- if or .Values.service.annotations .Values.customAnnotations }}
+ annotations:
+ {{- if .Values.service.annotations }}
+ {{- toYaml .Values.service.annotations | nindent 4 }}
+ {{- end }}
+ {{- if .Values.customAnnotations }}
+ {{- toYaml .Values.customAnnotations | nindent 4 }}
+ {{- end }}
+ {{- end }}
+spec:
+ selector:
+ app.kubernetes.io/instance: {{ .Release.Name | quote }}
+ {{- if .Values.isClusterService }}
+ k8s-app: {{ template "coredns.k8sapplabel" . }}
+ {{- end }}
+ app.kubernetes.io/name: {{ template "coredns.name" . }}
+ {{- if .Values.service.clusterIP }}
+ clusterIP: {{ .Values.service.clusterIP }}
+ {{- end }}
+ {{- if .Values.service.clusterIPs }}
+ clusterIPs:
+ {{ toYaml .Values.service.clusterIPs | nindent 4 }}
+ {{- end }}
+ {{- if .Values.service.externalIPs }}
+ externalIPs:
+ {{- toYaml .Values.service.externalIPs | nindent 4 }}
+ {{- end }}
+ {{- if .Values.service.externalTrafficPolicy }}
+ externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
+ {{- end }}
+ {{- if .Values.service.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.service.loadBalancerIP }}
+ {{- end }}
+ ports:
+{{ include "coredns.servicePorts" . | indent 2 -}}
+ type: {{ default "ClusterIP" .Values.serviceType }}
+ {{- if .Values.service.ipFamilyPolicy }}
+ ipFamilyPolicy: {{ .Values.service.ipFamilyPolicy }}
+ {{- end }}
+{{- end }}
diff --git a/charts/coredns/templates/serviceaccount-autoscaler.yaml b/charts/coredns/templates/serviceaccount-autoscaler.yaml
new file mode 100644
index 0000000..8b0e9c7
--- /dev/null
+++ b/charts/coredns/templates/serviceaccount-autoscaler.yaml
@@ -0,0 +1,22 @@
+{{- if and .Values.autoscaler.enabled .Values.rbac.create }}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ template "coredns.fullname" . }}-autoscaler
+ namespace: {{ .Release.Namespace }}
+ labels: {{- include "coredns.labels.autoscaler" . | nindent 4 }}
+{{- if .Values.customLabels }}
+{{ toYaml .Values.customLabels | indent 4 }}
+{{- end }}
+{{- with .Values.customAnnotations }}
+ annotations:
+{{- toYaml . | nindent 4 }}
+{{- end }}
+{{- if .Values.autoscaler.image.pullSecrets }}
+imagePullSecrets:
+{{- range .Values.autoscaler.image.pullSecrets }}
+ - name: {{ . }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/coredns/templates/serviceaccount.yaml b/charts/coredns/templates/serviceaccount.yaml
new file mode 100644
index 0000000..7211676
--- /dev/null
+++ b/charts/coredns/templates/serviceaccount.yaml
@@ -0,0 +1,22 @@
+{{- if and .Values.deployment.enabled .Values.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ template "coredns.serviceAccountName" . }}
+ labels: {{- include "coredns.labels" . | nindent 4 }}
+ {{- if or .Values.serviceAccount.annotations .Values.customAnnotations }}
+ annotations:
+ {{- if .Values.customAnnotations }}
+ {{- toYaml .Values.customAnnotations | nindent 4 }}
+ {{- end }}
+ {{- if .Values.serviceAccount.annotations }}
+ {{- toYaml .Values.serviceAccount.annotations | nindent 4 }}
+ {{- end }}
+ {{- end }}
+{{- if .Values.image.pullSecrets }}
+imagePullSecrets:
+{{- range .Values.image.pullSecrets }}
+ - name: {{ . }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/coredns/templates/servicemonitor.yaml b/charts/coredns/templates/servicemonitor.yaml
new file mode 100644
index 0000000..b5fc642
--- /dev/null
+++ b/charts/coredns/templates/servicemonitor.yaml
@@ -0,0 +1,36 @@
+{{- if and .Values.deployment.enabled .Values.prometheus.monitor.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: {{ template "coredns.fullname" . }}
+ {{- if .Values.prometheus.monitor.namespace }}
+ namespace: {{ .Values.prometheus.monitor.namespace }}
+ {{- end }}
+ labels: {{- include "coredns.labels" . | nindent 4 }}
+ {{- if .Values.prometheus.monitor.additionalLabels }}
+{{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }}
+ {{- end }}
+{{- with .Values.customAnnotations }}
+ annotations:
+{{- toYaml . | nindent 4 }}
+{{- end }}
+spec:
+ {{- if ne .Values.prometheus.monitor.namespace .Release.Namespace }}
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace }}
+ {{- end }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/instance: {{ .Release.Name | quote }}
+ {{- if .Values.isClusterService }}
+ k8s-app: {{ template "coredns.k8sapplabel" . }}
+ {{- end }}
+ app.kubernetes.io/name: {{ template "coredns.name" . }}
+ app.kubernetes.io/component: metrics
+ endpoints:
+ - port: metrics
+ {{- if .Values.prometheus.monitor.interval }}
+ interval: {{ .Values.prometheus.monitor.interval }}
+ {{- end }}
+{{- end }}
diff --git a/charts/coredns/values.yaml b/charts/coredns/values.yaml
new file mode 100644
index 0000000..5dc978a
--- /dev/null
+++ b/charts/coredns/values.yaml
@@ -0,0 +1,378 @@
+# Default values for coredns.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+image:
+ repository: coredns/coredns
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: ""
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ pullSecrets: []
+ # pullSecrets:
+ # - name: myRegistryKeySecretName
+
+replicaCount: 1
+
+resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
+rollingUpdate:
+ maxUnavailable: 1
+ maxSurge: 25%
+
+terminationGracePeriodSeconds: 30
+
+podAnnotations: {}
+# cluster-autoscaler.kubernetes.io/safe-to-evict: "false"
+
+serviceType: "ClusterIP"
+
+prometheus:
+ service:
+ enabled: false
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "9153"
+ monitor:
+ enabled: false
+ additionalLabels: {}
+ namespace: ""
+ interval: ""
+
+service:
+# clusterIP: ""
+# clusterIPs: []
+# loadBalancerIP: ""
+# externalIPs: []
+# externalTrafficPolicy: ""
+# ipFamilyPolicy: ""
+ # The name of the Service
+ # If not set, a name is generated using the fullname template
+ name: ""
+ annotations: {}
+
+serviceAccount:
+ create: false
+ # The name of the ServiceAccount to use
+ # If not set and create is true, a name is generated using the fullname template
+ name: ""
+ annotations: {}
+
+rbac:
+ # If true, create & use RBAC resources
+ create: true
+ # If true, create and use PodSecurityPolicy
+ pspEnable: false
+ # The name of the ServiceAccount to use.
+ # If not set and create is true, a name is generated using the fullname template
+ # name:
+
+# isClusterService specifies whether chart should be deployed as cluster-service or normal k8s app.
+isClusterService: true
+
+# Optional priority class to be used for the coredns pods. Used for autoscaler if autoscaler.priorityClassName not set.
+priorityClassName: ""
+
+# Configure the pod level securityContext.
+podSecurityContext: {}
+
+# Configure SecurityContext for Pod.
+# Ensure that required linux capability to bind port number below 1024 is assigned (`CAP_NET_BIND_SERVICE`).
+securityContext:
+ capabilities:
+ add:
+ - NET_BIND_SERVICE
+
+# Default zone is what Kubernetes recommends:
+# https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options
+servers:
+- zones:
+ - zone: .
+ port: 53
+ # If serviceType is nodePort you can specify nodePort here
+ # nodePort: 30053
+ # hostPort: 53
+ plugins:
+ - name: errors
+ # Serves a /health endpoint on :8080, required for livenessProbe
+ - name: health
+ configBlock: |-
+ lameduck 5s
+ # Serves a /ready endpoint on :8181, required for readinessProbe
+ - name: ready
+ # Required to query kubernetes API for data
+ - name: kubernetes
+ parameters: cluster.local in-addr.arpa ip6.arpa
+ configBlock: |-
+ pods insecure
+ fallthrough in-addr.arpa ip6.arpa
+ ttl 30
+ # Serves a /metrics endpoint on :9153, required for serviceMonitor
+ - name: prometheus
+ parameters: 0.0.0.0:9153
+ - name: forward
+ parameters: . /etc/resolv.conf
+ - name: cache
+ parameters: 30
+ - name: loop
+ - name: reload
+ - name: loadbalance
+
+# Complete example with all the options:
+# - zones: # the `zones` block can be left out entirely, defaults to "."
+# - zone: hello.world. # optional, defaults to "."
+# scheme: tls:// # optional, defaults to "" (which equals "dns://" in CoreDNS)
+# - zone: foo.bar.
+# scheme: dns://
+# use_tcp: true # set this parameter to optionally expose the port on tcp as well as udp for the DNS protocol
+# # Note that this will not work if you are also exposing tls or grpc on the same server
+# port: 12345 # optional, defaults to "" (which equals 53 in CoreDNS)
+# plugins: # the plugins to use for this server block
+# - name: kubernetes # name of plugin, if used multiple times ensure that the plugin supports it!
+# parameters: foo bar # list of parameters after the plugin
+# configBlock: |- # if the plugin supports extra block style config, supply it here
+# hello world
+# foo bar
+
+# Extra configuration that is applied outside of the default zone block.
+# Example to include additional config files, which may come from extraVolumes:
+# extraConfig:
+# import:
+# parameters: /opt/coredns/*.conf
+extraConfig: {}
+
+# To use the livenessProbe, the health plugin needs to be enabled in CoreDNS' server config
+livenessProbe:
+ enabled: true
+ initialDelaySeconds: 60
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 5
+ successThreshold: 1
+# To use the readinessProbe, the ready plugin needs to be enabled in CoreDNS' server config
+readinessProbe:
+ enabled: true
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 5
+ successThreshold: 1
+
+# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
+# for example:
+# affinity:
+# nodeAffinity:
+# requiredDuringSchedulingIgnoredDuringExecution:
+# nodeSelectorTerms:
+# - matchExpressions:
+# - key: foo.bar.com/role
+# operator: In
+# values:
+# - master
+affinity: {}
+
+# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#topologyspreadconstraint-v1-core
+# and supports Helm templating.
+# For example:
+# topologySpreadConstraints:
+# - labelSelector:
+# matchLabels:
+# app.kubernetes.io/name: '{{ template "coredns.name" . }}'
+# app.kubernetes.io/instance: '{{ .Release.Name }}'
+# topologyKey: topology.kubernetes.io/zone
+# maxSkew: 1
+# whenUnsatisfiable: ScheduleAnyway
+# - labelSelector:
+# matchLabels:
+# app.kubernetes.io/name: '{{ template "coredns.name" . }}'
+# app.kubernetes.io/instance: '{{ .Release.Name }}'
+# topologyKey: kubernetes.io/hostname
+# maxSkew: 1
+# whenUnsatisfiable: ScheduleAnyway
+topologySpreadConstraints: []
+
+# Node labels for pod assignment
+# Ref: https://kubernetes.io/docs/user-guide/node-selection/
+nodeSelector: {}
+
+# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
+# for example:
+# tolerations:
+# - key: foo.bar.com/role
+# operator: Equal
+# value: master
+# effect: NoSchedule
+tolerations: []
+
+# https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
+podDisruptionBudget: {}
+
+# configure custom zone files as per https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/
+zoneFiles: []
+# - filename: example.db
+# domain: example.com
+# contents: |
+# example.com. IN SOA sns.dns.icann.com. noc.dns.icann.com. 2015082541 7200 3600 1209600 3600
+# example.com. IN NS b.iana-servers.net.
+# example.com. IN NS a.iana-servers.net.
+# example.com. IN A 192.168.99.102
+# *.example.com. IN A 192.168.99.102
+
+# optional array of sidecar containers
+extraContainers: []
+# - name: some-container-name
+# image: some-image:latest
+# imagePullPolicy: Always
+# optional array of extra volumes to create
+extraVolumes: []
+# - name: some-volume-name
+# emptyDir: {}
+# optional array of mount points for extraVolumes
+extraVolumeMounts: []
+# - name: some-volume-name
+# mountPath: /etc/wherever
+
+# optional array of secrets to mount inside coredns container
+# possible usecase: need for secure connection with etcd backend
+extraSecrets: []
+# - name: etcd-client-certs
+# mountPath: /etc/coredns/tls/etcd
+# defaultMode: 420
+# - name: some-fancy-secret
+# mountPath: /etc/wherever
+# defaultMode: 440
+
+# To support legacy deployments using CoreDNS with the "k8s-app: kube-dns" label selectors.
+# See https://github.com/coredns/helm/blob/master/charts/coredns/README.md#adopting-existing-coredns-resources
+# k8sAppLabelOverride: "kube-dns"
+
+# Custom labels to apply to Deployment, Pod, Configmap, Service, ServiceMonitor. Including autoscaler if enabled.
+customLabels: {}
+
+# Custom annotations to apply to Deployment, Pod, Configmap, Service, ServiceMonitor. Including autoscaler if enabled.
+customAnnotations: {}
+
+## Alternative configuration for HPA deployment if wanted
+## Create HorizontalPodAutoscaler object.
+##
+# hpa:
+# enabled: false
+# minReplicas: 1
+# maxReplicas: 10
+# metrics:
+# metrics:
+# - type: Resource
+# resource:
+# name: memory
+# target:
+# type: Utilization
+# averageUtilization: 60
+# - type: Resource
+# resource:
+# name: cpu
+# target:
+# type: Utilization
+# averageUtilization: 60
+
+hpa:
+ enabled: false
+ minReplicas: 1
+ maxReplicas: 2
+ metrics: []
+
+## Configue a cluster-proportional-autoscaler for coredns
+# See https://github.com/kubernetes-incubator/cluster-proportional-autoscaler
+autoscaler:
+ # Enabled the cluster-proportional-autoscaler
+ enabled: false
+
+ # Number of cores in the cluster per coredns replica
+ coresPerReplica: 256
+ # Number of nodes in the cluster per coredns replica
+ nodesPerReplica: 16
+ # Min size of replicaCount
+ min: 0
+ # Max size of replicaCount (default of 0 is no max)
+ max: 0
+ # Whether to include unschedulable nodes in the nodes/cores calculations - this requires version 1.8.0+ of the autoscaler
+ includeUnschedulableNodes: false
+ # If true does not allow single points of failure to form
+ preventSinglePointFailure: true
+
+ # Annotations for the coredns proportional autoscaler pods
+ podAnnotations: {}
+
+ ## Optionally specify some extra flags to pass to cluster-proprtional-autoscaler.
+ ## Useful for e.g. the nodelabels flag.
+ # customFlags:
+ # - --nodelabels=topology.kubernetes.io/zone=us-east-1a
+
+ image:
+ repository: registry.k8s.io/cpa/cluster-proportional-autoscaler
+ tag: "1.8.5"
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ pullSecrets: []
+ # pullSecrets:
+ # - name: myRegistryKeySecretName
+
+ # Optional priority class to be used for the autoscaler pods. priorityClassName used if not set.
+ priorityClassName: ""
+
+ # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
+ affinity: {}
+
+ # Node labels for pod assignment
+ # Ref: https://kubernetes.io/docs/user-guide/node-selection/
+ nodeSelector: {}
+
+ # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
+ tolerations: []
+
+ # resources for autoscaler pod
+ resources:
+ requests:
+ cpu: "20m"
+ memory: "10Mi"
+ limits:
+ cpu: "20m"
+ memory: "10Mi"
+
+ # Options for autoscaler configmap
+ configmap:
+ ## Annotations for the coredns-autoscaler configmap
+ # i.e. strategy.spinnaker.io/versioned: "false" to ensure configmap isn't renamed
+ annotations: {}
+
+ # Enables the livenessProbe for cluster-proportional-autoscaler - this requires version 1.8.0+ of the autoscaler
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ timeoutSeconds: 5
+ failureThreshold: 3
+ successThreshold: 1
+
+ # optional array of sidecar containers
+ extraContainers: []
+ # - name: some-container-name
+ # image: some-image:latest
+ # imagePullPolicy: Always
+
+deployment:
+ enabled: true
+ name: ""
+ ## Annotations for the coredns deployment
+ annotations: {}
diff --git a/charts/csi-driver-smb/Chart.yaml b/charts/csi-driver-smb/Chart.yaml
new file mode 100644
index 0000000..5151a38
--- /dev/null
+++ b/charts/csi-driver-smb/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: v1.11.0
+description: SMB CSI Driver for Kubernetes
+name: csi-driver-smb
+version: v1.11.0
diff --git a/charts/csi-driver-smb/templates/NOTES.txt b/charts/csi-driver-smb/templates/NOTES.txt
new file mode 100644
index 0000000..9d8ca4f
--- /dev/null
+++ b/charts/csi-driver-smb/templates/NOTES.txt
@@ -0,0 +1,5 @@
+The CSI SMB Driver is getting deployed to your cluster.
+
+To check CSI SMB Driver pods status, please run:
+
+ kubectl --namespace={{ .Release.Namespace }} get pods --selector="app.kubernetes.io/name={{ .Release.Name }}" --watch
diff --git a/charts/csi-driver-smb/templates/_helpers.tpl b/charts/csi-driver-smb/templates/_helpers.tpl
new file mode 100644
index 0000000..5394ab9
--- /dev/null
+++ b/charts/csi-driver-smb/templates/_helpers.tpl
@@ -0,0 +1,29 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/* Expand the name of the chart.*/}}
+{{- define "smb.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/* labels for helm resources */}}
+{{- define "smb.labels" -}}
+labels:
+ app.kubernetes.io/instance: "{{ .Release.Name }}"
+ app.kubernetes.io/managed-by: "{{ .Release.Service }}"
+ app.kubernetes.io/name: "{{ template "smb.name" . }}"
+ app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+ {{- if .Values.customLabels }}
+{{ toYaml .Values.customLabels | indent 2 -}}
+ {{- end }}
+{{- end -}}
+
+{{/* pull secrets for containers */}}
+{{- define "smb.pullSecrets" -}}
+{{- if .Values.imagePullSecrets }}
+imagePullSecrets:
+{{- range .Values.imagePullSecrets }}
+ - name: {{ . }}
+{{- end }}
+{{- end }}
+{{- end -}}
diff --git a/charts/csi-driver-smb/templates/csi-smb-controller.yaml b/charts/csi-driver-smb/templates/csi-smb-controller.yaml
new file mode 100644
index 0000000..0ee52ca
--- /dev/null
+++ b/charts/csi-driver-smb/templates/csi-smb-controller.yaml
@@ -0,0 +1,137 @@
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: {{ .Values.controller.name }}
+ namespace: {{ .Release.Namespace }}
+{{ include "smb.labels" . | indent 2 }}
+spec:
+ replicas: {{ .Values.controller.replicas }}
+ selector:
+ matchLabels:
+ app: {{ .Values.controller.name }}
+ template:
+ metadata:
+{{ include "smb.labels" . | indent 6 }}
+ app: {{ .Values.controller.name }}
+ {{- if .Values.podLabels }}
+{{- toYaml .Values.podLabels | nindent 8 }}
+ {{- end }}
+{{- if .Values.podAnnotations }}
+ annotations:
+{{ toYaml .Values.podAnnotations | indent 8 }}
+{{- end }}
+ spec:
+{{- with .Values.controller.affinity }}
+ affinity:
+{{ toYaml . | indent 8 }}
+{{- end }}
+ hostNetwork: true
+ dnsPolicy: {{ .Values.controller.dnsPolicy }}
+ serviceAccountName: {{ .Values.serviceAccount.controller }}
+ nodeSelector:
+{{- with .Values.controller.nodeSelector }}
+{{ toYaml . | indent 8 }}
+{{- end }}
+ kubernetes.io/os: linux
+ {{- if .Values.controller.runOnMaster}}
+ node-role.kubernetes.io/master: ""
+ {{- end}}
+ {{- if .Values.controller.runOnControlPlane}}
+ node-role.kubernetes.io/control-plane: ""
+ {{- end}}
+ priorityClassName: {{ .Values.priorityClassName | quote }}
+ {{- if .Values.securityContext }}
+ securityContext: {{- toYaml .Values.securityContext | nindent 8 }}
+ {{- end }}
+{{- with .Values.controller.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+{{- end }}
+ {{- include "smb.pullSecrets" . | indent 6 }}
+ containers:
+ - name: csi-provisioner
+{{- if hasPrefix "/" .Values.image.csiProvisioner.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}"
+{{- else }}
+ image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}"
+{{- end }}
+ args:
+ - "-v=2"
+ - "--csi-address=$(ADDRESS)"
+ - "--leader-election"
+ - "--leader-election-namespace={{ .Release.Namespace }}"
+ - "--extra-create-metadata=true"
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ imagePullPolicy: {{ .Values.image.csiProvisioner.pullPolicy }}
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }}
+ securityContext:
+ readOnlyRootFilesystem: true
+ - name: liveness-probe
+{{- if hasPrefix "/" .Values.image.livenessProbe.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
+{{- else }}
+ image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
+{{- end }}
+ args:
+ - --csi-address=/csi/csi.sock
+ - --probe-timeout=3s
+ - --health-port={{ .Values.controller.livenessProbe.healthPort }}
+ - --v=2
+ imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
+ volumeMounts:
+ - name: socket-dir
+ mountPath: /csi
+ resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }}
+ securityContext:
+ readOnlyRootFilesystem: true
+ - name: smb
+{{- if hasPrefix "/" .Values.image.smb.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.image.smb.repository }}:{{ .Values.image.smb.tag }}"
+{{- else }}
+ image: "{{ .Values.image.smb.repository }}:{{ .Values.image.smb.tag }}"
+{{- end }}
+ imagePullPolicy: {{ .Values.image.smb.pullPolicy }}
+ args:
+ - "--v={{ .Values.controller.logLevel }}"
+ - "--endpoint=$(CSI_ENDPOINT)"
+ - "--metrics-address=0.0.0.0:{{ .Values.controller.metricsPort }}"
+ - "--drivername={{ .Values.driver.name }}"
+ - "--working-mount-dir={{ .Values.controller.workingMountDir }}"
+ ports:
+ - containerPort: {{ .Values.controller.livenessProbe.healthPort }}
+ name: healthz
+ protocol: TCP
+ - containerPort: {{ .Values.controller.metricsPort }}
+ name: metrics
+ protocol: TCP
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 30
+ env:
+ - name: CSI_ENDPOINT
+ value: unix:///csi/csi.sock
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ - mountPath: {{ .Values.controller.workingMountDir }}
+ name: tmp-dir
+ resources: {{- toYaml .Values.controller.resources.smb | nindent 12 }}
+ volumes:
+ - name: socket-dir
+ emptyDir: {}
+ - name: tmp-dir
+ emptyDir: {}
diff --git a/charts/csi-driver-smb/templates/csi-smb-driver.yaml b/charts/csi-driver-smb/templates/csi-smb-driver.yaml
new file mode 100644
index 0000000..1609437
--- /dev/null
+++ b/charts/csi-driver-smb/templates/csi-smb-driver.yaml
@@ -0,0 +1,8 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: CSIDriver
+metadata:
+ name: {{ .Values.driver.name }}
+spec:
+ attachRequired: false
+ podInfoOnMount: true
diff --git a/charts/csi-driver-smb/templates/csi-smb-node-windows.yaml b/charts/csi-driver-smb/templates/csi-smb-node-windows.yaml
new file mode 100644
index 0000000..b033b15
--- /dev/null
+++ b/charts/csi-driver-smb/templates/csi-smb-node-windows.yaml
@@ -0,0 +1,171 @@
+{{- if .Values.windows.enabled}}
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+ name: {{ .Values.windows.dsName }}
+ namespace: {{ .Release.Namespace }}
+{{ include "smb.labels" . | indent 2 }}
+spec:
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: {{ .Values.node.maxUnavailable }}
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ app: {{ .Values.windows.dsName }}
+ template:
+ metadata:
+{{ include "smb.labels" . | indent 6 }}
+ app: {{ .Values.windows.dsName }}
+ spec:
+{{- with .Values.windows.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+{{- end }}
+ nodeSelector:
+ kubernetes.io/os: windows
+{{- with .Values.node.nodeSelector }}
+{{ toYaml . | indent 8 }}
+{{- end }}
+{{- with .Values.node.affinity }}
+ affinity:
+{{ toYaml . | indent 8 }}
+{{- end }}
+ priorityClassName: {{ .Values.priorityClassName | quote }}
+ {{- if .Values.securityContext }}
+ securityContext: {{- toYaml .Values.securityContext | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ .Values.serviceAccount.node }}
+ {{- include "smb.pullSecrets" . | indent 6 }}
+ containers:
+ - name: liveness-probe
+ volumeMounts:
+ - mountPath: C:\csi
+ name: plugin-dir
+{{- if hasPrefix "/" .Values.image.livenessProbe.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
+{{- else }}
+ image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
+{{- end }}
+ args:
+ - --csi-address=$(CSI_ENDPOINT)
+ - --probe-timeout=3s
+ - --health-port={{ .Values.node.livenessProbe.healthPort }}
+ - --v=2
+ env:
+ - name: CSI_ENDPOINT
+ value: unix://C:\\csi\\csi.sock
+ imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
+ resources: {{- toYaml .Values.windows.resources.livenessProbe | nindent 12 }}
+ - name: node-driver-registrar
+{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}"
+{{- else }}
+ image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}"
+{{- end }}
+ args:
+ - --v=2
+ - --csi-address=$(CSI_ENDPOINT)
+ - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
+ livenessProbe:
+ exec:
+ command:
+ - /csi-node-driver-registrar.exe
+ - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
+ - --mode=kubelet-registration-probe
+ initialDelaySeconds: 60
+ timeoutSeconds: 30
+ env:
+ - name: CSI_ENDPOINT
+ value: unix://C:\\csi\\csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: {{ .Values.windows.kubelet | replace "\\" "\\\\" }}\\plugins\\{{ .Values.driver.name }}\\csi.sock
+ - name: KUBE_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }}
+ volumeMounts:
+ - name: plugin-dir
+ mountPath: C:\csi
+ - name: registration-dir
+ mountPath: C:\registration
+ resources: {{- toYaml .Values.windows.resources.nodeDriverRegistrar | nindent 12 }}
+ - name: smb
+{{- if hasPrefix "/" .Values.image.smb.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.image.smb.repository }}:{{ .Values.image.smb.tag }}"
+{{- else }}
+ image: "{{ .Values.image.smb.repository }}:{{ .Values.image.smb.tag }}"
+{{- end }}
+ imagePullPolicy: {{ .Values.image.smb.pullPolicy }}
+ args:
+ - "--v={{ .Values.node.logLevel }}"
+ - "--drivername={{ .Values.driver.name }}"
+ - --endpoint=$(CSI_ENDPOINT)
+ - --nodeid=$(KUBE_NODE_NAME)
+ - "--enable-get-volume-stats={{ .Values.feature.enableGetVolumeStats }}"
+ - "--remove-smb-mapping-during-unmount={{ .Values.windows.removeSMBMappingDuringUnmount }}"
+ ports:
+ - containerPort: {{ .Values.node.livenessProbe.healthPort }}
+ name: healthz
+ protocol: TCP
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 30
+ env:
+ - name: CSI_ENDPOINT
+ value: unix://C:\\csi\\csi.sock
+ - name: KUBE_NODE_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: spec.nodeName
+ volumeMounts:
+ - name: kubelet-dir
+ mountPath: {{ .Values.windows.kubelet }}\
+ - name: plugin-dir
+ mountPath: C:\csi
+ - name: csi-proxy-fs-pipe-v1
+ mountPath: \\.\pipe\csi-proxy-filesystem-v1
+ - name: csi-proxy-smb-pipe-v1
+ mountPath: \\.\pipe\csi-proxy-smb-v1
+ # these paths are still included for compatibility, they're used
+ # only if the node has still the beta version of the CSI proxy
+ - name: csi-proxy-fs-pipe-v1beta1
+ mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1
+ - name: csi-proxy-smb-pipe-v1beta1
+ mountPath: \\.\pipe\csi-proxy-smb-v1beta1
+ resources: {{- toYaml .Values.windows.resources.smb | nindent 12 }}
+ volumes:
+ - name: csi-proxy-fs-pipe-v1
+ hostPath:
+ path: \\.\pipe\csi-proxy-filesystem-v1
+ - name: csi-proxy-smb-pipe-v1
+ hostPath:
+ path: \\.\pipe\csi-proxy-smb-v1
+ # these paths are still included for compatibility, they're used
+ # only if the node has still the beta version of the CSI proxy
+ - name: csi-proxy-fs-pipe-v1beta1
+ hostPath:
+ path: \\.\pipe\csi-proxy-filesystem-v1beta1
+ - name: csi-proxy-smb-pipe-v1beta1
+ hostPath:
+ path: \\.\pipe\csi-proxy-smb-v1beta1
+ - name: registration-dir
+ hostPath:
+ path: {{ .Values.windows.kubelet }}\plugins_registry\
+ type: Directory
+ - name: kubelet-dir
+ hostPath:
+ path: {{ .Values.windows.kubelet }}\
+ type: Directory
+ - name: plugin-dir
+ hostPath:
+ path: {{ .Values.windows.kubelet }}\plugins\{{ .Values.driver.name }}\
+ type: DirectoryOrCreate
+{{- end -}}
diff --git a/charts/csi-driver-smb/templates/csi-smb-node.yaml b/charts/csi-driver-smb/templates/csi-smb-node.yaml
new file mode 100644
index 0000000..13e8ef7
--- /dev/null
+++ b/charts/csi-driver-smb/templates/csi-smb-node.yaml
@@ -0,0 +1,154 @@
+{{- if .Values.linux.enabled}}
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+ name: {{ .Values.linux.dsName }}
+ namespace: {{ .Release.Namespace }}
+{{ include "smb.labels" . | indent 2 }}
+spec:
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: {{ .Values.node.maxUnavailable }}
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ app: {{ .Values.linux.dsName }}
+ template:
+ metadata:
+{{ include "smb.labels" . | indent 6 }}
+ app: {{ .Values.linux.dsName }}
+ {{- if .Values.podLabels }}
+{{- toYaml .Values.podLabels | nindent 8 }}
+ {{- end }}
+{{- if .Values.podAnnotations }}
+ annotations:
+{{ toYaml .Values.podAnnotations | indent 8 }}
+{{- end }}
+ spec:
+{{- with .Values.node.affinity }}
+ affinity:
+{{ toYaml . | indent 8 }}
+{{- end }}
+ hostNetwork: true
+ dnsPolicy: {{ .Values.linux.dnsPolicy }}
+ serviceAccountName: {{ .Values.serviceAccount.node }}
+ nodeSelector:
+ kubernetes.io/os: linux
+{{- with .Values.node.nodeSelector }}
+{{ toYaml . | indent 8 }}
+{{- end }}
+ priorityClassName: {{ .Values.priorityClassName | quote }}
+ {{- if .Values.securityContext }}
+ securityContext: {{- toYaml .Values.securityContext | nindent 8 }}
+ {{- end }}
+{{- with .Values.linux.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+{{- end }}
+ {{- include "smb.pullSecrets" . | indent 6 }}
+ containers:
+ - name: liveness-probe
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+{{- if hasPrefix "/" .Values.image.livenessProbe.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
+{{- else }}
+ image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
+{{- end }}
+ args:
+ - --csi-address=/csi/csi.sock
+ - --probe-timeout=3s
+ - --health-port={{ .Values.node.livenessProbe.healthPort }}
+ - --v=2
+ imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
+ resources: {{- toYaml .Values.linux.resources.livenessProbe | nindent 12 }}
+ securityContext:
+ readOnlyRootFilesystem: true
+ - name: node-driver-registrar
+{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}"
+{{- else }}
+ image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}"
+{{- end }}
+ args:
+ - --csi-address=$(ADDRESS)
+ - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
+ - --v=2
+ livenessProbe:
+ exec:
+ command:
+ - /csi-node-driver-registrar
+ - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
+ - --mode=kubelet-registration-probe
+ initialDelaySeconds: 30
+ timeoutSeconds: 15
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}/csi.sock
+ imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }}
+ volumeMounts:
+ - name: socket-dir
+ mountPath: /csi
+ - name: registration-dir
+ mountPath: /registration
+ resources: {{- toYaml .Values.linux.resources.nodeDriverRegistrar | nindent 12 }}
+ - name: smb
+{{- if hasPrefix "/" .Values.image.smb.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.image.smb.repository }}:{{ .Values.image.smb.tag }}"
+{{- else }}
+ image: "{{ .Values.image.smb.repository }}:{{ .Values.image.smb.tag }}"
+{{- end }}
+ imagePullPolicy: {{ .Values.image.smb.pullPolicy }}
+ args:
+ - "--v={{ .Values.node.logLevel }}"
+ - "--drivername={{ .Values.driver.name }}"
+ - "--endpoint=$(CSI_ENDPOINT)"
+ - "--nodeid=$(KUBE_NODE_NAME)"
+ - "--enable-get-volume-stats={{ .Values.feature.enableGetVolumeStats }}"
+ ports:
+ - containerPort: {{ .Values.node.livenessProbe.healthPort }}
+ name: healthz
+ protocol: TCP
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 30
+ env:
+ - name: CSI_ENDPOINT
+ value: unix:///csi/csi.sock
+ - name: KUBE_NODE_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: spec.nodeName
+ securityContext:
+ readOnlyRootFilesystem: true
+ privileged: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ - mountPath: {{ .Values.linux.kubelet }}/
+ mountPropagation: Bidirectional
+ name: mountpoint-dir
+ resources: {{- toYaml .Values.linux.resources.smb | nindent 12 }}
+ volumes:
+ - hostPath:
+ path: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}
+ type: DirectoryOrCreate
+ name: socket-dir
+ - hostPath:
+ path: {{ .Values.linux.kubelet }}/
+ type: DirectoryOrCreate
+ name: mountpoint-dir
+ - hostPath:
+ path: {{ .Values.linux.kubelet }}/plugins_registry/
+ type: DirectoryOrCreate
+ name: registration-dir
+{{- end -}}
diff --git a/charts/csi-driver-smb/templates/rbac-csi-smb.yaml b/charts/csi-driver-smb/templates/rbac-csi-smb.yaml
new file mode 100644
index 0000000..03561d1
--- /dev/null
+++ b/charts/csi-driver-smb/templates/rbac-csi-smb.yaml
@@ -0,0 +1,65 @@
+{{- if .Values.serviceAccount.create -}}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ .Values.serviceAccount.controller }}
+ namespace: {{ .Release.Namespace }}
+{{ include "smb.labels" . | indent 2 }}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ .Values.serviceAccount.node }}
+ namespace: {{ .Release.Namespace }}
+{{ include "smb.labels" . | indent 2 }}
+{{ end }}
+
+{{- if .Values.rbac.create -}}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ .Values.rbac.name }}-external-provisioner-role
+{{ include "smb.labels" . | indent 2 }}
+rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["get", "list", "watch", "create", "update", "patch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["csinodes"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ verbs: ["get", "list", "watch", "create", "update", "patch"]
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get"]
+---
+
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ .Values.rbac.name }}-csi-provisioner-binding
+{{ include "smb.labels" . | indent 2 }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ .Values.serviceAccount.controller }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: {{ .Values.rbac.name }}-external-provisioner-role
+ apiGroup: rbac.authorization.k8s.io
+{{ end }}
diff --git a/charts/csi-driver-smb/values.yaml b/charts/csi-driver-smb/values.yaml
new file mode 100644
index 0000000..19b722d
--- /dev/null
+++ b/charts/csi-driver-smb/values.yaml
@@ -0,0 +1,153 @@
+image:
+ baseRepo: registry.k8s.io/sig-storage
+ smb:
+ repository: registry.k8s.io/sig-storage/smbplugin
+ tag: v1.11.0
+ pullPolicy: IfNotPresent
+ csiProvisioner:
+ repository: registry.k8s.io/sig-storage/csi-provisioner
+ tag: v3.5.0
+ pullPolicy: IfNotPresent
+ livenessProbe:
+ repository: registry.k8s.io/sig-storage/livenessprobe
+ tag: v2.10.0
+ pullPolicy: IfNotPresent
+ nodeDriverRegistrar:
+ repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
+ tag: v2.8.0
+ pullPolicy: IfNotPresent
+
+serviceAccount:
+ create: true # When true, service accounts will be created for you. Set to false if you want to use your own.
+ controller: csi-smb-controller-sa
+ node: csi-smb-node-sa
+
+rbac:
+ create: true
+ name: smb
+
+driver:
+ name: smb.csi.k8s.io
+
+feature:
+ enableGetVolumeStats: true
+
+controller:
+ name: csi-smb-controller
+ replicas: 1
+ dnsPolicy: ClusterFirstWithHostNet # available values: Default, ClusterFirstWithHostNet, ClusterFirst
+ metricsPort: 29644
+ livenessProbe:
+ healthPort: 29642
+ runOnMaster: false
+ runOnControlPlane: false
+ logLevel: 5
+ workingMountDir: "/tmp"
+ resources:
+ csiProvisioner:
+ limits:
+ memory: 300Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ livenessProbe:
+ limits:
+ memory: 100Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ smb:
+ limits:
+ memory: 200Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ affinity: {}
+ nodeSelector: {}
+ tolerations:
+ - key: "node-role.kubernetes.io/master"
+ operator: "Exists"
+ effect: "NoSchedule"
+ - key: "node-role.kubernetes.io/controlplane"
+ operator: "Exists"
+ effect: "NoSchedule"
+ - key: "node-role.kubernetes.io/control-plane"
+ operator: "Exists"
+ effect: "NoSchedule"
+
+node:
+ maxUnavailable: 1
+ logLevel: 5
+ livenessProbe:
+ healthPort: 29643
+ affinity: {}
+ nodeSelector: {}
+
+linux:
+ enabled: true
+ dsName: csi-smb-node # daemonset name
+ dnsPolicy: ClusterFirstWithHostNet # available values: Default, ClusterFirstWithHostNet, ClusterFirst
+ kubelet: /var/lib/kubelet
+ tolerations:
+ - operator: "Exists"
+ resources:
+ livenessProbe:
+ limits:
+ memory: 100Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ nodeDriverRegistrar:
+ limits:
+ memory: 100Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ smb:
+ limits:
+ memory: 200Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+
+windows:
+ enabled: false
+ dsName: csi-smb-node-win # daemonset name
+ kubelet: 'C:\var\lib\kubelet'
+ removeSMBMappingDuringUnmount: true
+ tolerations:
+ - key: "node.kubernetes.io/os"
+ operator: "Exists"
+ effect: "NoSchedule"
+ resources:
+ livenessProbe:
+ limits:
+ memory: 150Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ nodeDriverRegistrar:
+ limits:
+ memory: 150Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ smb:
+ limits:
+ memory: 200Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+
+customLabels: {}
+## Collection of annotations to add to all the pods
+podAnnotations: {}
+## Collection of labels to add to all the pods
+podLabels: {}
+## Leverage a PriorityClass to ensure your pods survive resource shortages
+## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
+priorityClassName: system-cluster-critical
+## Security context give the opportunity to run container as nonroot by setting a securityContext
+## by example :
+## securityContext: { runAsUser: 1001 }
+securityContext: { seccompProfile: {type: RuntimeDefault} }
diff --git a/charts/dns-api/.helmignore b/charts/dns-api/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/dns-api/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/dns-api/Chart.yaml b/charts/dns-api/Chart.yaml
new file mode 100644
index 0000000..1c00bce
--- /dev/null
+++ b/charts/dns-api/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: dns-api
+description: A Helm chart for dns-api
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/dns-api/templates/install.yaml b/charts/dns-api/templates/install.yaml
new file mode 100644
index 0000000..33d9a2d
--- /dev/null
+++ b/charts/dns-api/templates/install.yaml
@@ -0,0 +1,72 @@
+# TODO(gio): we'll need to separate intra-dns service and one accessible from k8s cluster
+apiVersion: v1
+kind: Service
+metadata:
+ name: dns-api
+ namespace: {{ .Release.Namespace }}
+ {{- if .Values.service.annotations }}
+ annotations:
+ {{- toYaml .Values.service.annotations | nindent 4 }}
+ {{- end }}
+spec:
+ type: {{ .Values.service.type }}
+ selector:
+ app: dns-api
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ protocol: TCP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: dns-api
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: dns-api
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: dns-api
+ spec:
+ containers:
+ - name: dns-api
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: 8080
+ protocol: TCP
+ command:
+ - dns-api
+ - --port=8080
+ - --root-dir={{ .Values.volume.mountPath }}
+ - --config={{ .Values.config }}
+ - --db={{ .Values.db }}
+ - --zone={{ .Values.zone }}
+ - --public-ip={{ .Values.publicIP }}
+ - --private-ip={{ .Values.privateIP }}
+ - --nameserver-ip={{ .Values.nameserverIP }}
+ volumeMounts:
+ - name: data
+ mountPath: {{ .Values.volume.mountPath }}
+ resources:
+ requests:
+ memory: "10Mi"
+ cpu: "10m"
+ limits:
+ memory: "20Mi"
+ cpu: "100m"
+ volumes:
+ - name: data
+ persistentVolumeClaim:
+ claimName: {{ .Values.volume.claimName }}
+ tolerations:
+ - key: "pcloud"
+ operator: "Equal"
+ value: "role"
+ effect: "NoSchedule"
diff --git a/charts/dns-api/values.yaml b/charts/dns-api/values.yaml
new file mode 100644
index 0000000..6c0f3d7
--- /dev/null
+++ b/charts/dns-api/values.yaml
@@ -0,0 +1,16 @@
+image:
+ repository: giolekva/dns-api
+ tag: latest
+ pullPolicy: Always
+config: "coredns.conf"
+db: "records.db"
+zone: "example.com"
+publicIP: "1.2.3.4,5.6.7.8"
+privateIP: "10.0.1.0"
+nameserverIP: "4.3.2.1,8.7.6.5"
+volume:
+ claimName: "data"
+ mountPath: "/pcloud"
+service:
+ type: "ClusterIP"
+ annotations: {}
diff --git a/charts/dodo-app/.helmignore b/charts/dodo-app/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/dodo-app/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/dodo-app/Chart.yaml b/charts/dodo-app/Chart.yaml
new file mode 100644
index 0000000..b91b7b9
--- /dev/null
+++ b/charts/dodo-app/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: dodo-app
+description: A Helm chart for updaring Dodo apps
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/dodo-app/templates/install.yaml b/charts/dodo-app/templates/install.yaml
new file mode 100644
index 0000000..58c920c
--- /dev/null
+++ b/charts/dodo-app/templates/install.yaml
@@ -0,0 +1,149 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ .Values.clusterRoleName }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - create
+- apiGroups:
+ - "batch"
+ resources:
+ - jobs
+ verbs:
+ - create
+- apiGroups:
+ - "helm.toolkit.fluxcd.io"
+ resources:
+ - helmreleases
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ .Values.clusterRoleName }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ .Values.clusterRoleName }}
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: {{ .Release.Namespace }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: ssh-key
+type: Opaque
+data:
+ private: {{ .Values.sshPrivateKey }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: api
+spec:
+ type: ClusterIP
+ selector:
+ app: dodo-app
+ ports:
+ - name: http
+ port: 80
+ targetPort: api
+ protocol: TCP
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: web
+spec:
+ type: ClusterIP
+ selector:
+ app: dodo-app
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ protocol: TCP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: dodo-app
+spec:
+ selector:
+ matchLabels:
+ app: dodo-app
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: dodo-app
+ spec:
+ volumes:
+ - name: ssh-key
+ secret:
+ secretName: ssh-key
+ - name: env-config
+ secret:
+ secretName: env-config
+ - name: db
+ persistentVolumeClaim:
+ claimName: {{ .Values.persistentVolumeClaimName }}
+ initContainers:
+ - name: volume-permissions
+ image: busybox:latest
+ command: ["sh", "-c", "chmod -Rv 777 /dodo-app/db"]
+ volumeMounts:
+ - name: db
+ mountPath: /dodo-app/db
+ containers:
+ - name: dodo-app
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: {{ .Values.port }}
+ protocol: TCP
+ - name: api
+ containerPort: {{ .Values.apiPort }}
+ protocol: TCP
+ command:
+ - pcloud-installer
+ - dodo-app
+ - --repo-addr={{ .Values.repoAddr }}
+ - --ssh-key=/pcloud/ssh-key/private
+ - --port={{ .Values.port }}
+ - --api-port={{ .Values.apiPort }}
+ - --self={{ .Values.self }}
+ - --repo-public-addr={{ .Values.repoPublicAddr }}
+ - --namespace={{ .Values.namespace }} # TODO(gio): maybe use .Release.Namespace ?
+ - --env-app-manager-addr={{ .Values.envAppManagerAddr }}
+ - --env-config=/pcloud/env-config/config.json
+ - --git-repo-public-key={{ .Values.gitRepoPublicKey }}
+ - --db=/dodo-app/db/apps.db
+ - --networks={{ .Values.allowedNetworks }}
+ - --external={{ .Values.external }}
+ - --fetch-users-addr={{ .Values.fetchUsersAddr }}
+ volumeMounts:
+ - name: ssh-key
+ readOnly: true
+ mountPath: /pcloud/ssh-key
+ - name: env-config
+ readOnly: true
+ mountPath: /pcloud/env-config
+ - name: db
+ mountPath: /dodo-app/db
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: env-config
+type: Opaque
+data:
+ config.json: {{ .Values.envConfig }}
diff --git a/charts/dodo-app/values.yaml b/charts/dodo-app/values.yaml
new file mode 100644
index 0000000..b1d57c4
--- /dev/null
+++ b/charts/dodo-app/values.yaml
@@ -0,0 +1,19 @@
+image:
+ repository: giolekva/pcloud-installer
+ tag: latest
+ pullPolicy: Always
+port: 8080
+apiPort: 8081
+clusterRoleName: dodo-app-creator
+repoAddr: 192.168.0.11
+sshPrivateKey: key
+self: ""
+repoPublicAddr: ""
+namespace: ""
+envAppManagerAddr: ""
+envConfig: ""
+gitRepoPublicKey: ""
+persistentVolumeClaimName: ""
+allowedNetworks: ""
+external: false
+fetchUsersAddr: ""
diff --git a/charts/env-manager/.helmignore b/charts/env-manager/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/env-manager/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/env-manager/Chart.yaml b/charts/env-manager/Chart.yaml
new file mode 100644
index 0000000..8920923
--- /dev/null
+++ b/charts/env-manager/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: env-manager
+description: A Helm chart for PCloud env manager
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/env-manager/templates/install.yaml b/charts/env-manager/templates/install.yaml
new file mode 100644
index 0000000..dd14caf
--- /dev/null
+++ b/charts/env-manager/templates/install.yaml
@@ -0,0 +1,94 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ .Values.clusterRoleName }}
+rules: # TODO(gio): restrict to ns create and dnszone get
+- apiGroups:
+ - "*"
+ resources:
+ - "*"
+ verbs:
+ - "*"
+# - apiGroups:
+# - ""
+# resources:
+# - namespaces
+# verbs:
+# - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ .Values.clusterRoleName }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ .Values.clusterRoleName }}
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: {{ .Release.Namespace }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: ssh-key
+type: Opaque
+data:
+ private: {{ .Values.sshPrivateKey }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: env-manager
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ metallb.universe.tf/address-pool: local
+spec:
+ type: LoadBalancer
+ selector:
+ app: env-manager
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ protocol: TCP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: env-manager
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: env-manager
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: env-manager
+ spec:
+ volumes:
+ - name: ssh-key
+ secret:
+ secretName: ssh-key
+ containers:
+ - name: env-manager
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: 8080
+ protocol: TCP
+ command:
+ - pcloud-installer
+ - envmanager
+ - --repo-addr={{ .Values.repoIP }}:{{ .Values.repoPort }}
+ - --repo-name={{ .Values.repoName }}
+ - --ssh-key=/pcloud/ssh-key/private
+ - --port=8080
+ volumeMounts:
+ - name: ssh-key
+ readOnly: true
+ mountPath: /pcloud/ssh-key
diff --git a/charts/env-manager/values.yaml b/charts/env-manager/values.yaml
new file mode 100644
index 0000000..45b9874
--- /dev/null
+++ b/charts/env-manager/values.yaml
@@ -0,0 +1,9 @@
+image:
+ repository: giolekva/pcloud-installer
+ tag: latest
+ pullPolicy: Always
+repoIP: 192.168.0.11
+repoPort: 22
+repoName: pcloud
+sshPrivateKey: key
+clusterRoleName: pcloud-env-manager
diff --git a/charts/flux-bootstrap/.helmignore b/charts/flux-bootstrap/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/flux-bootstrap/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/flux-bootstrap/Chart.yaml b/charts/flux-bootstrap/Chart.yaml
new file mode 100644
index 0000000..26b3ff3
--- /dev/null
+++ b/charts/flux-bootstrap/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: flux-bootstrap
+description: A Helm chart to bootstrap Fluxcd on PCloud
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/flux-bootstrap/templates/access-keys.yaml b/charts/flux-bootstrap/templates/access-keys.yaml
new file mode 100644
index 0000000..3a1a7f5
--- /dev/null
+++ b/charts/flux-bootstrap/templates/access-keys.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: access-keys
+ namespace: {{ .Release.Namespace }}
+data:
+ private.key: {{ toYaml .Values.privateKey | indent 2 }}
diff --git a/charts/flux-bootstrap/templates/fluxcd.yaml b/charts/flux-bootstrap/templates/fluxcd.yaml
new file mode 100644
index 0000000..85c3a1a
--- /dev/null
+++ b/charts/flux-bootstrap/templates/fluxcd.yaml
@@ -0,0 +1,54 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: fluxcd-bootstrap
+ namespace: {{ .Release.Namespace }}
+spec:
+ template:
+ spec:
+ volumes:
+ - name: access-keys
+ configMap:
+ name: access-keys
+ - name: known-hosts
+ configMap:
+ name: known-hosts
+ containers:
+ - name: fluxcd
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ volumeMounts:
+ - name: access-keys
+ mountPath: /access-keys
+ - name: known-hosts
+ mountPath: /.ssh
+ env:
+ - name: SSH_KNOWN_HOSTS
+ value: /.ssh/known_hosts
+ command:
+ - flux
+ - bootstrap
+ - git
+ - --namespace={{ .Values.installationNamespace }}
+ - --url={{ .Values.repositoryAddress }}
+ - --branch={{ .Values.repository.branch }}
+ - --path={{ .Values.repository.path }}
+ - --private-key-file=/access-keys/private.key
+ - --ssh-key-algorithm=ed25519
+ - --silent
+ - --watch-all-namespaces
+ - --verbose
+ # - name: dbg
+ # image: debian:12.2
+ # imagePullPolicy: IfNotPresent
+ # volumeMounts:
+ # - name: access-keys
+ # mountPath: /access-keys
+ # - name: known-hosts
+ # mountPath: /.ssh
+ # env:
+ # - name: SSH_KNOWN_HOSTS
+ # value: /.ssh/known_hosts
+ # command: [ "/bin/bash", "-c", "--" ]
+ # args: [ "while true; do sleep 30; done;" ]
+ restartPolicy: Never
diff --git a/charts/flux-bootstrap/templates/known-hosts.yaml b/charts/flux-bootstrap/templates/known-hosts.yaml
new file mode 100644
index 0000000..072c0d6
--- /dev/null
+++ b/charts/flux-bootstrap/templates/known-hosts.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: known-hosts
+ namespace: {{ .Release.Namespace }}
+binaryData:
+ known_hosts: {{ .Values.repositoryHostPublicKeys | b64enc }}
diff --git a/charts/flux-bootstrap/templates/service-account.yaml b/charts/flux-bootstrap/templates/service-account.yaml
new file mode 100644
index 0000000..a2168eb
--- /dev/null
+++ b/charts/flux-bootstrap/templates/service-account.yaml
@@ -0,0 +1,41 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: flux-bootstrap
+ namespace: {{ .Release.Namespace }}
+rules:
+ - apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["*"]
+ - apiGroups: [""]
+ resources: ["*"]
+ verbs: ["*"]
+ - apiGroups: ["rbac.authorization.k8s.io"]
+ resources: ["*"]
+ verbs: ["*"]
+ - apiGroups: ["apps"]
+ resources: ["*"]
+ verbs: ["*"]
+ - apiGroups: ["networking.k8s.io"]
+ resources: ["*"]
+ verbs: ["*"]
+ - apiGroups: ["kustomize.toolkit.fluxcd.io"]
+ resources: ["*"]
+ verbs: ["*"]
+ - apiGroups: ["source.toolkit.fluxcd.io"]
+ resources: ["*"]
+ verbs: ["*"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: flux-bootstrap
+ namespace: {{ .Release.Namespace }}
+subjects:
+ - kind: ServiceAccount
+ name: default
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: flux-bootstrap
+ apiGroup: rbac.authorization.k8s.io
diff --git a/charts/flux-bootstrap/values.yaml b/charts/flux-bootstrap/values.yaml
new file mode 100644
index 0000000..8221196
--- /dev/null
+++ b/charts/flux-bootstrap/values.yaml
@@ -0,0 +1,13 @@
+image:
+ repository: giolekva/flux
+ tag: latest
+ pullPolicy: Always
+repositoryAddress: ""
+repositoryHost: ""
+positoryHostPublicKeys: []
+repository:
+ address: ssh://git@<host>/<org>/<repository>
+ branch: master
+ path: /
+privateKey: ""
+installationNamespace: pcloud-flux
diff --git a/charts/fluxcd-reconciler/.helmignore b/charts/fluxcd-reconciler/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/fluxcd-reconciler/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/fluxcd-reconciler/Chart.yaml b/charts/fluxcd-reconciler/Chart.yaml
new file mode 100644
index 0000000..c504ef5
--- /dev/null
+++ b/charts/fluxcd-reconciler/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: fluxcd-reconciler
+description: A Helm chart for a service triggering fluxcd reconcilations
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/fluxcd-reconciler/templates/install.yaml b/charts/fluxcd-reconciler/templates/install.yaml
new file mode 100644
index 0000000..29ce6ef
--- /dev/null
+++ b/charts/fluxcd-reconciler/templates/install.yaml
@@ -0,0 +1,91 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: fluxcd-reconciler
+ namespace: {{ .Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: fluxcd-reconciler
+rules: # TODO(gio): restrict to ns create and dnszone get
+- apiGroups:
+ - "source.toolkit.fluxcd.io/v1"
+ resources:
+ - "gitrepositories"
+ verbs:
+ - get
+ - update
+- apiGroups:
+ - "kustomize.toolkit.fluxcd.io"
+ resources:
+ - "kustomizations"
+ verbs:
+ - get
+ - update
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: fluxcd-reconciler
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: fluxcd-reconciler
+subjects:
+- kind: ServiceAccount
+ name: fluxcd-reconciler
+ namespace: {{ .Release.Namespace }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: fluxcd-reconciler
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: fluxcd-reconciler
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ protocol: TCP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: fluxcd-reconciler
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: fluxcd-reconciler
+ replicas: 1
+ serviceAccountName: fluxcd-reconciler
+ template:
+ metadata:
+ labels:
+ app: fluxcd-reconciler
+ spec:
+ containers:
+ - name: fluxcd-reconciler
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: 8080
+ protocol: TCP
+ command: ["/fluxcd_reconciler", "--port=8080"]
+ resources:
+ requests:
+ memory: "10Mi"
+ cpu: "10m"
+ limits:
+ memory: "20Mi"
+ cpu: "100m"
+ tolerations:
+ - key: "pcloud"
+ operator: "Equal"
+ value: "role"
+ effect: "NoSchedule"
diff --git a/charts/fluxcd-reconciler/values.yaml b/charts/fluxcd-reconciler/values.yaml
new file mode 100644
index 0000000..cbf0bca
--- /dev/null
+++ b/charts/fluxcd-reconciler/values.yaml
@@ -0,0 +1,4 @@
+image:
+ repository: giolekva/fluxcd-reconciler
+ tag: latest
+ pullPolicy: Always
diff --git a/charts/gerrit-replica/.helmignore b/charts/gerrit-replica/.helmignore
new file mode 100644
index 0000000..4f4562f
--- /dev/null
+++ b/charts/gerrit-replica/.helmignore
@@ -0,0 +1,24 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+
+docs/
+supplements/
diff --git a/charts/gerrit-replica/Chart.yaml b/charts/gerrit-replica/Chart.yaml
new file mode 100644
index 0000000..0fc9a45
--- /dev/null
+++ b/charts/gerrit-replica/Chart.yaml
@@ -0,0 +1,24 @@
+apiVersion: v2
+appVersion: 3.9.1
+description: |-
+ The Gerrit replica serves as a read-only Gerrit instance to serve repositories
+ that it receives from a Gerrit instance via replication. It can be used to
+ reduce the load on Gerrit instances.
+name: gerrit-replica
+version: 0.2.0
+maintainers:
+- name: Thomas Draebing
+ email: thomas.draebing@sap.com
+- name: Matthias Sohn
+ email: matthias.sohn@sap.com
+- name: Sasa Zivkov
+ email: sasa.zivkov@sap.com
+- name: Christian Halstrick
+ email: christian.halstrick@sap.com
+home: https://gerrit.googlesource.com/k8s-gerrit/+/master/helm-charts/gerrit-replica
+icon: http://commondatastorage.googleapis.com/gerrit-static/diffy-w200.png
+sources:
+- https://gerrit.googlesource.com/k8s-gerrit/+/master
+keywords:
+- gerrit
+- git
diff --git a/charts/gerrit-replica/LICENSE b/charts/gerrit-replica/LICENSE
new file mode 100644
index 0000000..028fc9f
--- /dev/null
+++ b/charts/gerrit-replica/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright (C) 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/charts/gerrit-replica/README.md b/charts/gerrit-replica/README.md
new file mode 100644
index 0000000..8b68b43
--- /dev/null
+++ b/charts/gerrit-replica/README.md
@@ -0,0 +1,547 @@
+# Gerrit replica on Kubernetes
+
+Gerrit is a web-based code review tool, which acts as a Git server. On large setups
+Gerrit servers can see a sizable amount of traffic from git operations performed by
+developers and build servers. The major part of requests are read-only requests
+(e.g. by `git fetch` operations). To take some load of the Gerrit server,
+Gerrit replicas can be deployed to serve read-only requests.
+
+This helm chart provides a Gerrit replica setup that can be deployed on Kubernetes.
+The Gerrit replica is capable of receiving replicated git repositories from a
+Gerrit. The Gerrit replica can then serve authenticated read-only requests.
+
+***note
+Gerrit versions before 3.0 are no longer supported, since the support of ReviewDB
+was removed.
+***
+
+## Prerequisites
+
+- Helm (>= version 3.0)
+
+ (Check out [this guide](https://docs.helm.sh/using_helm/#quickstart-guide)
+ how to install and use helm.)
+
+- Access to a provisioner for persistent volumes with `Read Write Many (RWM)`-
+ capability.
+
+ A list of applicaple volume types can be found
+ [here](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes).
+ This project was developed using the
+ [NFS-server-provisioner helm chart](https://github.com/helm/charts/tree/master/stable/nfs-server-provisioner),
+ a NFS-provisioner deployed in the Kubernetes cluster itself. Refer to
+ [this guide](/helm-charts/gerrit-replica/docs/nfs-provisioner.md) of how to
+ deploy it in context of this project.
+
+- A domain name that is configured to point to the IP address of the node running
+ the Ingress controller on the kubernetes cluster (as described
+ [here](http://alesnosek.com/blog/2017/02/14/accessing-kubernetes-pods-from-outside-of-the-cluster/)).
+
+- (Optional: Required, if SSL is configured)
+ A [Java keystore](https://gerrit-review.googlesource.com/Documentation/config-gerrit.html#httpd.sslKeyStore)
+ to be used by Gerrit.
+
+## Installing the Chart
+
+***note
+**ATTENTION:** The value for `ingress.host` is required for rendering
+the chart's templates. The nature of the value does not allow defaults.
+Thus a custom `values.yaml`-file setting this value is required!
+***
+
+To install the chart with the release name `gerrit-replica`, execute:
+
+```sh
+cd $(git rev-parse --show-toplevel)/helm-charts
+helm install \
+ gerrit-replica \ # release name
+ ./gerrit-replica \ # path to chart
+ -f <path-to-custom-values>.yaml
+```
+
+The command deploys the Gerrit replica on the current Kubernetes cluster. The
+[configuration section](#Configuration) lists the parameters that can be
+configured during installation.
+
+The Gerrit replica requires the replicated `All-Projects.git`- and `All-Users.git`-
+repositories to be present in the `/var/gerrit/git`-directory. The `gerrit-init`-
+InitContainer will wait for this being the case. A way to do this is to access
+the Gerrit replica pod and to clone the repositories from the primary Gerrit (Make
+sure that you have the correct access rights do so.):
+
+```sh
+kubectl exec -it <gerrit-replica-pod> -c gerrit-init ash
+gerrit@<gerrit-replica-pod>:/var/tools$ cd /var/gerrit/git
+gerrit@<gerrit-replica-pod>:/var/gerrit/git$ git clone "http://gerrit.com/All-Projects" --mirror
+Cloning into bare repository 'All-Projects.git'...
+gerrit@<gerrit-replica-pod>:/var/gerrit/git$ git clone "http://gerrit.com/All-Users" --mirror
+Cloning into bare repository 'All-Users.git'...
+```
+
+## Configuration
+
+The following sections list the configurable values in `values.yaml`. To configure
+a Gerrit replica setup, make a copy of the `values.yaml`-file and change the
+parameters as needed. The configuration can be applied by installing the chart as
+described [above](#Installing-the-chart).
+
+In addition, single options can be set without creating a custom `values.yaml`:
+
+```sh
+cd $(git rev-parse --show-toplevel)/helm-charts
+helm install \
+ gerrit-replica \ # release name
+ ./gerrit-replica \ # path to chart
+ --set=gitRepositoryStorage.size=100Gi,gitBackend.replicas=2
+```
+
+### Container images
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `images.busybox.registry` | The registry to pull the busybox container images from | `docker.io` |
+| `images.busybox.tag` | The busybox image tag to use | `latest` |
+| `images.registry.name` | The image registry to pull the container images from | `` |
+| `images.registry.ImagePullSecret.name` | Name of the ImagePullSecret | `image-pull-secret` (if empty no image pull secret will be deployed) |
+| `images.registry.ImagePullSecret.create` | Whether to create an ImagePullSecret | `false` |
+| `images.registry.ImagePullSecret.username` | The image registry username | `nil` |
+| `images.registry.ImagePullSecret.password` | The image registry password | `nil` |
+| `images.version` | The image version (image tag) to use | `latest` |
+| `images.imagePullPolicy` | Image pull policy | `Always` |
+| `images.additionalImagePullSecrets` | Additional image pull policies that pods should use | `[]` |
+
+### Labels
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `additionalLabels` | Additional labels for resources managed by this Helm chart | `{}` |
+
+### Storage classes
+
+For information of how a `StorageClass` is configured in Kubernetes, read the
+[official Documentation](https://kubernetes.io/docs/concepts/storage/storage-classes/#introduction).
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `storageClasses.default.name` | The name of the default StorageClass (RWO) | `default` |
+| `storageClasses.default.create` | Whether to create the StorageClass | `false` |
+| `storageClasses.default.provisioner` | Provisioner of the StorageClass | `kubernetes.io/aws-ebs` |
+| `storageClasses.default.reclaimPolicy` | Whether to `Retain` or `Delete` volumes, when they become unbound | `Delete` |
+| `storageClasses.default.parameters` | Parameters for the provisioner | `parameters.type: gp2`, `parameters.fsType: ext4` |
+| `storageClasses.default.mountOptions` | The mount options of the default StorageClass | `[]` |
+| `storageClasses.default.allowVolumeExpansion` | Whether to allow volume expansion. | `false` |
+| `storageClasses.shared.name` | The name of the shared StorageClass (RWM) | `shared-storage` |
+| `storageClasses.shared.create` | Whether to create the StorageClass | `false` |
+| `storageClasses.shared.provisioner` | Provisioner of the StorageClass | `nfs` |
+| `storageClasses.shared.reclaimPolicy` | Whether to `Retain` or `Delete` volumes, when they become unbound | `Delete` |
+| `storageClasses.shared.parameters` | Parameters for the provisioner | `parameters.mountOptions: vers=4.1` |
+| `storageClasses.shared.mountOptions` | The mount options of the shared StorageClass | `[]` |
+| `storageClasses.shared.allowVolumeExpansion` | Whether to allow volume expansion. | `false` |
+
+### CA certificate
+
+Some application may require TLS verification. If the default CA built into the
+containers is not enough a custom CA certificate can be given to the deployment.
+Note, that Gerrit will require its CA in a JKS keytore, which is described below.
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `caCert` | CA certificate for TLS verification (if not set, the default will be used) | `None` |
+
+### Workaround for NFS
+
+Kubernetes will not always be able to adapt the ownership of the files within NFS
+volumes. Thus, a workaround exists that will add init-containers to
+adapt file ownership. Note, that only the ownership of the root directory of the
+volume will be changed. All data contained within will be expected to already be
+owned by the user used by Gerrit. Also the ID-domain will be configured to ensure
+correct ID-mapping.
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `nfsWorkaround.enabled` | Whether the volume used is an NFS-volume | `false` |
+| `nfsWorkaround.chownOnStartup` | Whether to chown the volume on pod startup | `false` |
+| `nfsWorkaround.idDomain` | The ID-domain that should be used to map user-/group-IDs for the NFS mount | `localdomain.com` |
+
+### Network policies
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `networkPolicies.enabled` | Whether to enable preconfigured NetworkPolicies | `false` |
+| `networkPolicies.dnsPorts` | List of ports used by DNS-service (e.g. KubeDNS) | `[53, 8053]` |
+
+The NetworkPolicies provided here are quite strict and do not account for all
+possible scenarios. Thus, custom NetworkPolicies have to be added, e.g. for
+connecting to a database. On the other hand some defaults may be not restrictive
+enough. By default, the ingress traffic of the git-backend pod is not restricted.
+Thus, every source (with the right credentials) could push to the git-backend.
+To add an additional layer of security, the ingress rule could be defined more
+finegrained. The chart provides the possibility to define custom rules for ingress-
+traffic of the git-backend pod under `gitBackend.networkPolicy.ingress`.
+Depending on the scenario, there are different ways to restrict the incoming
+connections.
+
+If the replicator (e.g. Gerrit) is running in a pod on the same cluster,
+a podSelector (and namespaceSelector, if the pod is running in a different
+namespace) can be used to whitelist the traffic:
+
+```yaml
+gitBackend:
+ networkPolicy:
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app: gerrit
+```
+
+If the replicator is outside the cluster, the IP of the replicator can also be
+whitelisted, e.g.:
+
+```yaml
+gitBackend:
+ networkPolicy:
+ ingress:
+ - from:
+ - ipBlock:
+ cidr: xxx.xxx.0.0/16
+```
+
+The same principle also applies to other use cases, e.g. connecting to a database.
+For more information about the NetworkPolicy resource refer to the
+[Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/).
+
+### Storage for Git repositories
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `gitRepositoryStorage.externalPVC.use` | Whether to use a PVC deployed outside the chart | `false` |
+| `gitRepositoryStorage.externalPVC.name` | Name of the external PVC | `git-repositories-pvc` |
+| `gitRepositoryStorage.size` | Size of the volume storing the Git repositories | `5Gi` |
+
+If the git repositories should be persisted even if the chart is deleted and in
+a way that the volume containing them can be mounted by the reinstalled chart,
+the PVC claiming the volume has to be created independently of the chart. To use
+the external PVC, set `gitRepositoryStorage.externalPVC.enabled` to `true` and
+give the name of the PVC under `gitRepositoryStorage.externalPVC.name`.
+
+### Storage for Logs
+
+In addition to collecting logs with a log collection tool like Promtail, the logs
+can also be stored in a persistent volume. This volume has to be a read-write-many
+volume to be able to be used by multiple pods.
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `logStorage.enabled` | Whether to enable persistence of logs | `false` |
+| `logStorage.externalPVC.use` | Whether to use a PVC deployed outside the chart | `false` |
+| `logStorage.externalPVC.name` | Name of the external PVC | `gerrit-logs-pvc` |
+| `logStorage.size` | Size of the volume | `5Gi` |
+| `logStorage.cleanup.enabled` | Whether to regularly delete old logs | `false` |
+| `logStorage.cleanup.schedule` | Cron schedule defining when to run the cleanup job | `0 0 * * *` |
+| `logStorage.cleanup.retentionDays` | Number of days to retain the logs | `14` |
+| `logStorage.cleanup.resources` | Resources the container is allowed to use | `requests.cpu: 100m` |
+| `logStorage.cleanup.additionalPodLabels` | Additional labels for pods | `{}` |
+| | | `requests.memory: 256Mi` |
+| | | `limits.cpu: 100m` |
+| | | `limits.memory: 256Mi` |
+
+Each pod will create a separate folder for its logs, allowing to trace logs to
+the respective pods.
+
+### Istio
+
+Istio can be used as an alternative to Kubernetes Ingresses to manage the traffic
+into the cluster and also inside the cluster. This requires istio to be installed
+beforehand. Some guidance on how to set up istio can be found [here](/Documentation/istio.md).
+The helm chart expects `istio-injection` to be enabled in the namespace, in which
+it will be installed.
+
+In the case istio is used, all configuration for ingresses in the chart will be
+ignored.
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `istio.enabled` | Whether istio should be used (requires istio to be installed) | `false` |
+| `istio.host` | Hostname (CNAME must point to istio ingress gateway loadbalancer service) | `nil` |
+| `istio.tls.enabled` | Whether to enable TLS | `false` |
+| `istio.tls.secret.create` | Whether to create TLS certificate secret | `true` |
+| `istio.tls.secret.name` | Name of external secret containing TLS certificates | `nil` |
+| `istio.tls.cert` | TLS certificate | `-----BEGIN CERTIFICATE-----` |
+| `istio.tls.key` | TLS key | `-----BEGIN RSA PRIVATE KEY-----` |
+| `istio.ssh.enabled` | Whether to enable SSH | `false` |
+
+### Ingress
+
+As an alternative to istio the Nginx Ingress controller can be used to manage
+ingress traffic.
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `ingress.enabled` | Whether to deploy an Ingress | `false` |
+| `ingress.host` | Host name to use for the Ingress (required for Ingress) | `nil` |
+| `ingress.maxBodySize` | Maximum request body size allowed (Set to 0 for an unlimited request body size) | `50m` |
+| `ingress.additionalAnnotations` | Additional annotations for the Ingress | `nil` |
+| `ingress.tls.enabled` | Whether to enable TLS termination in the Ingress | `false` |
+| `ingress.tls.secret.create` | Whether to create a TLS-secret | `true` |
+| `ingress.tls.secret.name` | Name of an external secret that will be used as a TLS-secret | `nil` |
+| `ingress.tls.cert` | Public SSL server certificate | `-----BEGIN CERTIFICATE-----` |
+| `ingress.tls.key` | Private SSL server certificate | `-----BEGIN RSA PRIVATE KEY-----` |
+
+***note
+For graceful shutdown to work with an ingress, the ingress controller has to be
+configured to gracefully close the connections as well.
+***
+
+### Promtail Sidecar
+
+To collect Gerrit logs, a Promtail sidecar can be deployed into the Gerrit replica
+pods. This can for example be used together with the [gerrit-monitoring](https://gerrit-review.googlesource.com/admin/repos/gerrit-monitoring)
+project.
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `promtailSidecar.enabled` | Whether to install the Promatil sidecar container | `false` |
+| `promtailSidecar.image` | The promtail container image to use | `grafana/promtail` |
+| `promtailSidecar.version` | The promtail container image version | `1.3.0` |
+| `promtailSidecar.resources` | Configure the amount of resources the container requests/is allowed | `requests.cpu: 100m` |
+| | | `requests.memory: 128Mi` |
+| | | `limits.cpu: 200m` |
+| | | `limits.memory: 128Mi` |
+| `promtailSidecar.tls.skipverify` | Whether to skip TLS verification | `true` |
+| `promtailSidecar.tls.caCert` | CA certificate for TLS verification | `-----BEGIN CERTIFICATE-----` |
+| `promtailSidecar.loki.url` | URL to reach Loki | `loki.example.com` |
+| `promtailSidecar.loki.user` | Loki user | `admin` |
+| `promtailSidecar.loki.password` | Loki password | `secret` |
+
+
+### Apache-Git-HTTP-Backend (Git-Backend)
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `gitBackend.image` | Image name of the Apache-git-http-backend container image | `k8sgerrit/apache-git-http-backend` |
+| `gitBackend.additionalPodLabels` | Additional labels for Pods | `{}` |
+| `gitBackend.tolerations` | Taints and tolerations work together to ensure that pods are not scheduled onto inappropriate nodes. For more information, please refer to the following documents. [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration) | [] |
+| `gitBackend.topologySpreadConstraints` | Control how Pods are spread across your cluster among failure-domains. For more information, please refer to the following documents. [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints) | {} |
+| `gitBackend.nodeSelector` | Assigns a Pod to the specified Nodes. For more information, please refer to the following documents. [Assign Pods to Nodes](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/). [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) | {} |
+| `gitBackend.affinity` | Assigns a Pod to the specified Nodes | podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].weight: 100 |
+| | | podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey: "topology.kubernetes.io/zone" |
+| | | podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].key: app |
+| | | podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].operator: In |
+| | | podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].values[0]: git-backend |
+| `gitBackend.replicas` | Number of pod replicas to deploy | `1` |
+| `gitBackend.maxSurge` | Max. percentage or number of pods allowed to be scheduled above the desired number | `25%` |
+| `gitBackend.maxUnavailable` | Max. percentage or number of pods allowed to be unavailable at a time | `100%` |
+| `gitBackend.networkPolicy.ingress` | Custom ingress-network policy for git-backend pods | `[{}]` (allow all) |
+| `gitBackend.networkPolicy.egress` | Custom egress-network policy for git-backend pods | `nil` |
+| `gitBackend.resources` | Configure the amount of resources the pod requests/is allowed | `requests.cpu: 100m` |
+| | | `requests.memory: 256Mi` |
+| | | `limits.cpu: 100m` |
+| | | `limits.memory: 256Mi` |
+| `gitBackend.livenessProbe` | Configuration of the liveness probe timings | `{initialDelaySeconds: 10, periodSeconds: 5}` |
+| `gitBackend.readinessProbe` | Configuration of the readiness probe timings | `{initialDelaySeconds: 5, periodSeconds: 1}` |
+| `gitBackend.credentials.htpasswd` | `.htpasswd`-file containing username/password-credentials for accessing git | `git:$apr1$O/LbLKC7$Q60GWE7OcqSEMSfe/K8xU.` (user: git, password: secret) |
+| `gitBackend.service.additionalAnnotations` | Additional annotations for the Service | `{}` |
+| `gitBackend.service.loadBalancerSourceRanges` | The list of allowed IPs for the Service | `[]` |
+| `gitBackend.service.type` | Which kind of Service to deploy | `LoadBalancer` |
+| `gitBackend.service.externalTrafficPolicy` | Specify how traffic from external is handled | `Cluster` |
+| `gitBackend.service.http.enabled` | Whether to serve HTTP-requests (needed for Ingress) | `true` |
+| `gitBackend.service.http.port` | Port over which to expose HTTP | `80` |
+| `gitBackend.service.https.enabled` | Whether to serve HTTPS-requests | `false` |
+| `gitBackend.service.https.port` | Port over which to expose HTTPS | `443` |
+
+***note
+At least one endpoint (HTTP and/or HTTPS) has to be enabled in the service!
+***
+
+Project creation, project deletion and HEAD update can also replicated. To enable
+this feature configure the replication plugin to use an adminUrl using the format
+`gerrit+https://<apache-git-http-backend host>`.
+
+### Git garbage collection
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `gitGC.image` | Image name of the Git-GC container image | `k8sgerrit/git-gc` |
+| `gitGC.schedule` | Cron-formatted schedule with which to run Git garbage collection | `0 6,18 * * *` |
+| `gitGC.resources` | Configure the amount of resources the pod requests/is allowed | `requests.cpu: 100m` |
+| | | `requests.memory: 256Mi` |
+| | | `limits.cpu: 100m` |
+| | | `limits.memory: 256Mi` |
+| `gitGC.tolerations` | Taints and tolerations work together to ensure that pods are not scheduled onto inappropriate nodes. For more information, please refer to the following documents. [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration) | [] |
+| `gitGC.nodeSelector` | Assigns a Pod to the specified Nodes. For more information, please refer to the following documents. [Assign Pods to Nodes](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/). [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) | {} |
+| `gitGC.affinity` | Assigns a Pod to the specified Nodes. For more information, please refer to the following documents. [Assign Pods to Nodes using Node Affinity](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/). [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) | {} |
+| `gitGC.additionalPodLabels` | Additional labels for Pods | `{}` |
+
+### Gerrit replica
+
+***note
+The way the Jetty servlet used by Gerrit works, the Gerrit replica component of the
+gerrit-replica chart actually requires the URL to be known, when the chart is installed.
+The suggested way to do that is to use the provided Ingress resource. This requires
+that a URL is available and that the DNS is configured to point the URL to the
+IP of the node the Ingress controller is running on!
+***
+
+***note
+Setting the canonical web URL in the gerrit.config to the host used for the Ingress
+is mandatory, if access to the Gerrit replica is required!
+***
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `gerritReplica.images.gerritInit` | Image name of the Gerrit init container image | `k8sgerrit/gerrit-init` |
+| `gerritReplica.images.gerritReplica` | Image name of the Gerrit replica container image | `k8sgerrit/gerrit` |
+| `gerritReplica.tolerations` | Taints and tolerations work together to ensure that pods are not scheduled onto inappropriate nodes. For more information, please refer to the following documents. [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration) | [] |
+| `gerritReplica.topologySpreadConstraints` | Control how Pods are spread across your cluster among failure-domains. For more information, please refer to the following documents. [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints) | {} |
+| `gerritReplica.nodeSelector` | Assigns a Pod to the specified Nodes. For more information, please refer to the following documents. [Assign Pods to Nodes](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/). [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) | {} |
+| `gerritReplica.affinity` | Assigns a Pod to the specified Nodes. By default, gerrit-replica is evenly distributed on `topology.kubernetes.io/zone`. For more information, please refer to the following documents. [Assign Pods to Nodes using Node Affinity](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/). [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) | podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].weight: 100 |
+| | | podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey: "topology.kubernetes.io/zone" |
+| | | podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].key: app |
+| | | podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].operator: In |
+| | | podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].values[0]: gerrit-replica |
+| `gerritReplica.replicas` | Number of pod replicas to deploy | `1` |
+| `gerritReplica.additionalAnnotations` | Additional annotations for the Pods | {} |
+| `gerritReplica.additionalPodLabels` | Additional labels for the Pods | `{}` |
+| `gerritReplica.maxSurge` | Max. percentage or number of pods allowed to be scheduled above the desired number | `25%` |
+| `gerritReplica.maxUnavailable` | Max. percentage or number of pods allowed to be unavailable at a time | `100%` |
+| `gerritReplica.livenessProbe` | Configuration of the liveness probe timings | `{initialDelaySeconds: 60, periodSeconds: 5}` |
+| `gerritReplica.probeScheme` | Scheme for probes, for example HTTPS | `nil` |
+| `gerritReplica.readinessProbe` | Configuration of the readiness probe timings | `{initialDelaySeconds: 10, periodSeconds: 10}` |
+| `gerritReplica.startupProbe` | Configuration of the startup probe timings | `{initialDelaySeconds: 10, periodSeconds: 5}` |
+| `gerritReplica.gracefulStopTimeout` | Time in seconds Kubernetes will wait until killing the pod during termination (has to be longer then Gerrit's httpd.gracefulStopTimeout to allow graceful shutdown of Gerrit) | `90` |
+| `gerritReplica.resources` | Configure the amount of resources the pod requests/is allowed | `requests.cpu: 1` |
+| | | `requests.memory: 5Gi` |
+| | | `limits.cpu: 1` |
+| | | `limits.memory: 6Gi` |
+| `gerritReplica.networkPolicy.ingress` | Custom ingress-network policy for gerrit-replica pods | `nil` |
+| `gerritReplica.networkPolicy.egress` | Custom egress-network policy for gerrit-replica pods | `nil` |
+| `gerritReplica.service.additionalAnnotations` | Additional annotations for the Service | `{}` |
+| `gerritReplica.service.loadBalancerSourceRanges` | The list of allowed IPs for the Service | `[]` |
+| `gerritReplica.service.type` | Which kind of Service to deploy | `NodePort` |
+| `gerritReplica.service.externalTrafficPolicy` | Specify how traffic from external is handled | `Cluster` |
+| `gerritReplica.service.http.port` | Port over which to expose HTTP | `80` |
+| `gerritReplica.service.ssh.enabled` | Whether to enable SSH for the Gerrit replica | `false` |
+| `gerritReplica.service.ssh.port` | Port for SSH | `29418` |
+| `gerritReplica.keystore` | base64-encoded Java keystore (`cat keystore.jks \| base64`) to be used by Gerrit, when using SSL | `nil` |
+| `gerritReplica.pluginManagement.plugins` | List of Gerrit plugins to install | `[]` |
+| `gerritReplica.pluginManagement.plugins[0].name` | Name of plugin | `nil` |
+| `gerritReplica.pluginManagement.plugins[0].url` | Download url of plugin. If given the plugin will be downloaded, otherwise it will be installed from the gerrit.war-file. | `nil` |
+| `gerritReplica.pluginManagement.plugins[0].sha1` | SHA1 sum of plugin jar used to ensure file integrity and version (optional) | `nil` |
+| `gerritReplica.pluginManagement.plugins[0].installAsLibrary` | Whether the plugin should be symlinked to the lib-dir in the Gerrit site. | `nil` |
+| `gerritReplica.pluginManagement.libs` | List of Gerrit library modules to install | `[]` |
+| `gerritReplica.pluginManagement.libs[0].name` | Name of the lib module | `nil` |
+| `gerritReplica.pluginManagement.libs[0].url` | Download url of lib module. | `nil` |
+| `gerritReplica.pluginManagement.libs[0].sha1` | SHA1 sum of plugin jar used to ensure file integrity and version | `nil` |
+| `gerritReplica.pluginManagement.cache.enabled` | Whether to cache downloaded plugins | `false` |
+| `gerritReplica.pluginManagement.cache.size` | Size of the volume used to store cached plugins | `1Gi` |
+| `gerritReplica.priorityClassName` | Name of the PriorityClass to apply to replica pods | `nil` |
+| `gerritReplica.etc.config` | Map of config files (e.g. `gerrit.config`) that will be mounted to `$GERRIT_SITE/etc`by a ConfigMap | `{gerrit.config: ..., replication.config: ...}`[see here](#Gerrit-config-files) |
+| `gerritReplica.etc.secret` | Map of config files (e.g. `secure.config`) that will be mounted to `$GERRIT_SITE/etc`by a Secret | `{secure.config: ...}` [see here](#Gerrit-config-files) |
+| `gerritReplica.additionalConfigMaps` | Allows to mount additional ConfigMaps into a subdirectory of `$SITE/data` | `[]` |
+| `gerritReplica.additionalConfigMaps[*].name` | Name of the ConfigMap | `nil` |
+| `gerritReplica.additionalConfigMaps[*].subDir` | Subdirectory under `$SITE/data` into which the files should be symlinked | `nil` |
+| `gerritReplica.additionalConfigMaps[*].data` | Data of the ConfigMap. If not set, ConfigMap has to be created manually | `nil` |
+
+### Gerrit config files
+
+The gerrit-replica chart provides a ConfigMap containing the configuration files
+used by Gerrit, e.g. `gerrit.config` and a Secret containing sensitive configuration
+like the `secure.config` to configure the Gerrit installation in the Gerrit
+component. The content of the config files can be set in the `values.yaml` under
+the keys `gerritReplica.etc.config` and `gerritReplica.etc.secret` respectively.
+The key has to be the filename (eg. `gerrit.config`) and the file's contents
+the value. This way an arbitrary number of configuration files can be loaded into
+the `$GERRIT_SITE/etc`-directory, e.g. for plugins.
+All configuration options for Gerrit are described in detail in the
+[official documentation of Gerrit](https://gerrit-review.googlesource.com/Documentation/config-gerrit.html).
+Some options however have to be set in a specified way for Gerrit to work as
+intended with the chart:
+
+- `gerrit.basePath`
+
+ Path to the directory containing the repositories. The chart mounts this
+ directory from a persistent volume to `/var/gerrit/git` in the container. For
+ Gerrit to find the correct directory, this has to be set to `git`.
+
+- `gerrit.serverId`
+
+ In Gerrit-version higher than 2.14 Gerrit needs a server ID, which is used by
+ NoteDB. Gerrit would usually generate a random ID on startup, but since the
+ gerrit.config file is read only, when mounted as a ConfigMap this fails.
+ Thus the server ID has to be set manually!
+
+- `gerrit.canonicalWebUrl`
+
+ The canonical web URL has to be set to the Ingress host.
+
+- `httpd.listenURL`
+
+ This has to be set to `proxy-http://*:8080/` or `proxy-https://*:8080`,
+ depending of TLS is enabled in the Ingress or not, otherwise the Jetty
+ servlet will run into an endless redirect loop.
+
+- `httpd.gracefulStopTimeout` / `sshd.gracefulStopTimeout`
+
+ To enable graceful shutdown of the embedded jetty server and SSHD, a timeout
+ has to be set with this option. This will be the maximum time, Gerrit will wait
+ for HTTP requests to finish before shutdown.
+
+- `container.user`
+
+ The technical user in the Gerrit replica container is called `gerrit`. Thus, this
+ value is required to be `gerrit`.
+
+- `container.replica`
+
+ Since this chart is meant to install a Gerrit replica, this naturally has to be
+ `true`.
+
+- `container.javaHome`
+
+ This has to be set to `/usr/lib/jvm/java-11-openjdk-amd64`, since this is
+ the path of the Java installation in the container.
+
+- `container.javaOptions`
+
+ The maximum heap size has to be set. And its value has to be lower than the
+ memory resource limit set for the container (e.g. `-Xmx4g`). In your calculation
+ allow memory for other components running in the container.
+
+To enable liveness- and readiness probes, the healthcheck plugin will be installed
+by default. Note, that by configuring to use a packaged or downloaded version of
+the healthcheck plugin, the configured version will take precedence over the default
+version. The plugin is by default configured to disable the `querychanges` and
+`auth` healthchecks, since the Gerrit replica does not index changes and a new
+Gerrit server will not yet necessarily have an user to validate authentication.
+
+The default configuration can be overwritten by adding the `healthcheck.config`
+file as a key-value pair to `gerritReplica.etc.config` as for every other configuration.
+
+SSH keys should be configured via the helm-chart using the `gerritReplica.etc.secret`
+map. Gerrit will create its own keys, if none are present in the site, but if
+multiple Gerrit pods are running, each Gerrit instance would have its own keys.
+Users accessing Gerrit via a load balancer would get issues due to changing
+host keys.
+
+## Upgrading the Chart
+
+To upgrade an existing installation of the gerrit-replica chart, e.g. to install
+a newer chart version or to use an updated custom `values.yaml`-file, execute
+the following command:
+
+```sh
+cd $(git rev-parse --show-toplevel)/helm-charts
+helm upgrade \
+ <release-name> \
+ ./gerrit-replica \ # path to chart
+ -f <path-to-custom-values>.yaml \
+```
+
+## Uninstalling the Chart
+
+To delete the chart from the cluster, use:
+
+```sh
+helm delete <release-name>
+```
diff --git a/charts/gerrit-replica/docs/nfs-provisioner.md b/charts/gerrit-replica/docs/nfs-provisioner.md
new file mode 100644
index 0000000..e2d0806
--- /dev/null
+++ b/charts/gerrit-replica/docs/nfs-provisioner.md
@@ -0,0 +1,64 @@
+# Installing a NFS-provisioner
+
+The Gerrit replica requires access to a persistent volume capable of running in
+`Read Write Many (RWM)`-mode to store the git repositories, since the repositories
+have to be accessed by mutiple pods. One possibility to provide such volumes
+is to install a provisioner for NFS-volumes into the same Kubernetes-cluster.
+This document will guide through the process.
+
+The [Kubernetes external-storage project](https://github.com/kubernetes-incubator/external-storage)
+provides an out-of-tree dynamic [provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs)
+for NFS volumes. A chart exists for easy deployment of the project onto a
+Kubernetes cluster. The chart's sources can be found [here](https://github.com/helm/charts/tree/master/stable/nfs-server-provisioner).
+
+## Prerequisites
+
+This guide will use Helm to install the NFS-provisioner. Thus, Helm has to be
+installed.
+
+## Installing the nfs-server-provisioner chart
+
+A custom `values.yaml`-file containing a configuration tested with the
+gerrit-replica chart can be found in the `supplements/nfs`-directory in the
+gerrit-replica chart's root directory. In addition a file stating the tested
+version of the nfs-server-provisioner chart is present in the same directory.
+
+If needed, adapt the `values.yaml`-file for the nfs-server-provisioner chart
+further and then run:
+
+```sh
+cd $(git rev-parse --show-toplevel)/helm-charts/gerrit-replica/supplements/nfs
+helm install nfs \
+ stable/nfs-server-provisioner \
+ -f values.yaml \
+ --version $(cat VERSION)
+```
+
+For a description of the configuration options, refer to the
+[chart's documentation](https://github.com/helm/charts/blob/master/stable/nfs-server-provisioner/README.md).
+
+Here are some tips for configuring the nfs-server-provisioner chart to work with
+the gerrit-replica chart:
+
+- Deploying more than 1 `replica` led to some reliability issues in tests and
+ should be further tested for now, if required.
+- The name of the StorageClass created for NFS-volumes has to be the same as the
+ one defined in the gerrit-replica chart for `storageClasses.shared.name`
+- The StorageClas for NFS-volumes needs to have the parameter `mountOptions: vers=4.1`,
+ due to compatibility [issues](https://github.com/kubernetes-incubator/external-storage/issues/223)
+ with Ganesha.
+
+## Deleting the nfs-server-provisioner chart
+
+***note
+**Attention:** Never delete the nfs-server-provisioner chart, if there is still a
+PersistentVolumeClaim and Pods using a NFS-volume provisioned by the NFS server
+provisioner. This will lead to crashed pods, that will not be terminated correctly.
+***
+
+If no Pod or PVC is using a NFS-volume provisioned by the NFS server provisioner
+anymore, delete it like any other chart:
+
+```sh
+helm delete nfs
+```
diff --git a/charts/gerrit-replica/supplements/nfs/VERSION b/charts/gerrit-replica/supplements/nfs/VERSION
new file mode 100644
index 0000000..7dff5b8
--- /dev/null
+++ b/charts/gerrit-replica/supplements/nfs/VERSION
@@ -0,0 +1 @@
+0.2.1
\ No newline at end of file
diff --git a/charts/gerrit-replica/supplements/nfs/values.yaml b/charts/gerrit-replica/supplements/nfs/values.yaml
new file mode 100644
index 0000000..aa3d9ce
--- /dev/null
+++ b/charts/gerrit-replica/supplements/nfs/values.yaml
@@ -0,0 +1,42 @@
+# Deploying more than 1 `replica` led to some reliability issues in tests and
+# should be further tested for now, if required.
+replicaCount: 1
+
+image:
+ repository: quay.io/kubernetes_incubator/nfs-provisioner
+ tag: v1.0.9
+ pullPolicy: IfNotPresent
+
+service:
+ type: ClusterIP
+ nfsPort: 2049
+ mountdPort: 20048
+ rpcbindPort: 51413
+
+persistence:
+ enabled: true
+ storageClass: default
+ accessMode: ReadWriteOnce
+ size: 7.5Gi
+
+storageClass:
+ create: true
+ defaultClass: false
+ # The name of the StorageClass has to be the same as the one defined in the
+ # gerrit-replica chart for `storageClasses.shared.name`
+ name: shared-storage
+ parameters:
+ # Required!
+ mountOptions: vers=4.1
+ reclaimPolicy: Delete
+
+rbac:
+ create: true
+
+resources:
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ limits:
+ cpu: 100m
+ memory: 256Mi
diff --git a/charts/gerrit-replica/templates/NOTES.txt b/charts/gerrit-replica/templates/NOTES.txt
new file mode 100644
index 0000000..30e263f
--- /dev/null
+++ b/charts/gerrit-replica/templates/NOTES.txt
@@ -0,0 +1,35 @@
+A Gerrit replica has been deployed.
+=================================
+
+The Apache-Git-HTTP-Backend is now ready to receive replication requests from the
+primary Gerrit. Please configure the replication plugin of the primary Gerrit to
+push the repositories to:
+
+{{ if .Values.istio.enabled -}}
+ http {{- if .Values.istio.tls.enabled -}} s {{- end -}} :// {{- .Values.istio.host -}} /${name}.git
+{{ else if .Values.ingress.enabled -}}
+ http {{- if .Values.ingress.tls.enabled -}} s {{- end -}} :// {{- .Values.ingress.host -}} /${name}.git
+{{- else }}
+ http://<EXTERNAL-IP>: {{- .Values.gitBackend.service.http.port -}} /${name}.git
+ The external IP of the service can be found by running:
+ kubectl get svc git-backend-service
+{{- end }}
+
+Project creation, project deletion and HEAD update can also be replicated. To enable
+this feature configure the replication plugin to use an adminUrl using the format
+`gerrit+http {{- if .Values.ingress.tls.enabled -}} s {{- end -}} :// {{- .Values.ingress.host -}}`.
+
+A detailed guide of how to configure Gerrit's replication plugin can be found here:
+
+https://gerrit.googlesource.com/plugins/replication/+doc/master/src/main/resources/Documentation/config.md
+
+The Gerrit replica is starting up.
+
+The initialization process may take some time. Afterwards the git repositories
+will be available under:
+
+{{ if .Values.istio.enabled -}}
+ http {{- if .Values.istio.tls.enabled -}} s {{- end -}} :// {{- .Values.istio.host -}} /<repository-name>.git
+{{- else }}
+ http {{- if .Values.ingress.tls.enabled -}} s {{- end -}} :// {{- .Values.ingress.host -}} /<repository-name>.git
+{{- end }}
diff --git a/charts/gerrit-replica/templates/_helpers.tpl b/charts/gerrit-replica/templates/_helpers.tpl
new file mode 100644
index 0000000..500d58c
--- /dev/null
+++ b/charts/gerrit-replica/templates/_helpers.tpl
@@ -0,0 +1,20 @@
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "gerrit-replica.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create secret to access docker registry
+*/}}
+{{- define "imagePullSecret" }}
+{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.images.registry.name (printf "%s:%s" .Values.images.registry.ImagePullSecret.username .Values.images.registry.ImagePullSecret.password | b64enc) | b64enc }}
+{{- end }}
+
+{{/*
+Add '/' to registry if needed.
+*/}}
+{{- define "registry" -}}
+{{ if .Values.images.registry.name }}{{- printf "%s/" .Values.images.registry.name -}}{{end}}
+{{- end -}}
diff --git a/charts/gerrit-replica/templates/gerrit-replica.configmap.yaml b/charts/gerrit-replica/templates/gerrit-replica.configmap.yaml
new file mode 100644
index 0000000..1aa9496
--- /dev/null
+++ b/charts/gerrit-replica/templates/gerrit-replica.configmap.yaml
@@ -0,0 +1,78 @@
+{{- $root := . -}}
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Release.Name }}-gerrit-replica-configmap
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ {{- range $key, $value := .Values.gerritReplica.etc.config }}
+ {{ $key }}:
+{{ toYaml $value | indent 4 }}
+ {{- end }}
+ {{- if not (hasKey .Values.gerritReplica.etc.config "healthcheck.config") }}
+ healthcheck.config: |-
+ [healthcheck "auth"]
+ # On new instances there may be no users to use for healthchecks
+ enabled = false
+ [healthcheck "querychanges"]
+ # On new instances there won't be any changes to query
+ enabled = false
+ {{- end }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Release.Name }}-gerrit-init-configmap
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ gerrit-init.yaml: |-
+ {{ if .Values.caCert -}}
+ caCertPath: /var/config/ca.crt
+ {{- end }}
+ pluginCacheEnabled: {{ .Values.gerritReplica.pluginManagement.cache.enabled }}
+ pluginCacheDir: /var/mnt/plugins
+ {{- if .Values.gerritReplica.pluginManagement.plugins }}
+ plugins:
+{{ toYaml .Values.gerritReplica.pluginManagement.plugins | indent 6}}
+ {{- end }}
+ {{- if .Values.gerritReplica.pluginManagement.libs }}
+ libs:
+{{ toYaml .Values.gerritReplica.pluginManagement.libs | indent 6}}
+ {{- end }}
+{{- range .Values.gerritReplica.additionalConfigMaps -}}
+{{- if .data }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ $root.Release.Name }}-{{ .name }}
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ $root.Release.Name }}
+ chart: {{ template "gerrit-replica.chart" $root }}
+ heritage: {{ $root.Release.Service }}
+ release: {{ $root.Release.Name }}
+ {{- if $root.Values.additionalLabels }}
+{{ toYaml $root.Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+{{ toYaml .data | indent 2 }}
+{{- end }}
+{{- end }}
diff --git a/charts/gerrit-replica/templates/gerrit-replica.secrets.yaml b/charts/gerrit-replica/templates/gerrit-replica.secrets.yaml
new file mode 100644
index 0000000..ece9b9a
--- /dev/null
+++ b/charts/gerrit-replica/templates/gerrit-replica.secrets.yaml
@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-gerrit-replica-secure-config
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ {{ if .Values.gerritReplica.keystore -}}
+ keystore: {{ .Values.gerritReplica.keystore }}
+ {{- end }}
+ {{- range $key, $value := .Values.gerritReplica.etc.secret }}
+ {{ $key }}: {{ $value | b64enc }}
+ {{- end }}
+type: Opaque
diff --git a/charts/gerrit-replica/templates/gerrit-replica.service.yaml b/charts/gerrit-replica/templates/gerrit-replica.service.yaml
new file mode 100644
index 0000000..01030b4
--- /dev/null
+++ b/charts/gerrit-replica/templates/gerrit-replica.service.yaml
@@ -0,0 +1,40 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Release.Name }}-gerrit-replica-service
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+ {{- if .Values.gerritReplica.service.additionalAnnotations }}
+ annotations:
+{{ toYaml .Values.gerritReplica.service.additionalAnnotations | indent 4 }}
+ {{- end }}
+spec:
+ {{ with .Values.gerritReplica.service }}
+ {{- if .loadBalancerSourceRanges -}}
+ loadBalancerSourceRanges:
+{{- range .loadBalancerSourceRanges }}
+ - {{ . | quote }}
+{{- end }}
+ {{- end }}
+ ports:
+ - name: http
+ port: {{ .http.port }}
+ targetPort: 8080
+ {{ if .ssh.enabled -}}
+ - name: ssh
+ port: {{ .ssh.port }}
+ targetPort: 29418
+ {{- end }}
+ type: {{ .type }}
+ externalTrafficPolicy: {{ .externalTrafficPolicy }}
+ {{- end }}
+ selector:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
diff --git a/charts/gerrit-replica/templates/gerrit-replica.stateful-set.yaml b/charts/gerrit-replica/templates/gerrit-replica.stateful-set.yaml
new file mode 100644
index 0000000..8493c7a
--- /dev/null
+++ b/charts/gerrit-replica/templates/gerrit-replica.stateful-set.yaml
@@ -0,0 +1,346 @@
+{{- $root := . -}}
+
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ .Release.Name }}-gerrit-replica-statefulset
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ serviceName: {{ .Release.Name }}-gerrit-replica-service
+ replicas: {{ .Values.gerritReplica.replicas }}
+ updateStrategy:
+ rollingUpdate:
+ partition: {{ .Values.gerritReplica.updatePartition }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 8 }}
+ {{- end }}
+ {{- if .Values.gerritReplica.additionalPodLabels }}
+{{ toYaml .Values.gerritReplica.additionalPodLabels | indent 8 }}
+ {{- end }}
+ annotations:
+ chartRevision: "{{ .Release.Revision }}"
+ {{- if .Values.gerritReplica.additionalAnnotations }}
+{{ toYaml .Values.gerritReplica.additionalAnnotations | indent 8 }}
+ {{- end }}
+ spec:
+ {{- with .Values.gerritReplica.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gerritReplica.topologySpreadConstraints }}
+ topologySpreadConstraints:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gerritReplica.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gerritReplica.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gerritReplica.priorityClassName }}
+ priorityClassName: {{ . }}
+ {{- end }}
+ terminationGracePeriodSeconds: {{ .Values.gerritReplica.gracefulStopTimeout }}
+ securityContext:
+ fsGroup: 100
+ {{ if .Values.images.registry.ImagePullSecret.name -}}
+ imagePullSecrets:
+ - name: {{ .Values.images.registry.ImagePullSecret.name }}
+ {{- range .Values.images.additionalImagePullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+ initContainers:
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.chownOnStartup }}
+ - name: nfs-init
+ image: {{ .Values.images.busybox.registry -}}/busybox:{{- .Values.images.busybox.tag }}
+ command:
+ - sh
+ - -c
+ args:
+ - |
+ chown 1000:100 /var/mnt/logs
+ chown 1000:100 /var/mnt/git
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: logs
+ subPathExpr: "gerrit-replica/$(POD_NAME)"
+ mountPath: "/var/mnt/logs"
+ - name: git-repositories
+ mountPath: "/var/mnt/git"
+ {{- if .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ {{- end }}
+ - name: gerrit-init
+ image: {{ template "registry" . }}{{ .Values.gerritReplica.images.gerritInit }}:{{ .Values.images.version }}
+ imagePullPolicy: {{ .Values.images.imagePullPolicy }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: gerrit-site
+ mountPath: "/var/gerrit"
+ - name: git-repositories
+ mountPath: "/var/mnt/git"
+ - name: logs
+ subPathExpr: "gerrit-replica/$(POD_NAME)"
+ mountPath: "/var/mnt/logs"
+ - name: gerrit-init-config
+ mountPath: "/var/config/gerrit-init.yaml"
+ subPath: gerrit-init.yaml
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ {{- if and .Values.gerritReplica.pluginManagement.cache.enabled }}
+ - name: gerrit-plugin-cache
+ mountPath: "/var/mnt/plugins"
+ {{- end }}
+ - name: gerrit-config
+ mountPath: "/var/mnt/etc/config"
+ - name: gerrit-replica-secure-config
+ mountPath: "/var/mnt/etc/secret"
+ {{ if .Values.caCert -}}
+ - name: tls-ca
+ subPath: ca.crt
+ mountPath: "/var/config/ca.crt"
+ {{- end }}
+ {{- range .Values.gerritReplica.additionalConfigMaps }}
+ - name: {{ .name }}
+ mountPath: "/var/mnt/data/{{ .subDir }}"
+ {{- end }}
+ containers:
+ - name: gerrit-replica
+ image: {{ template "registry" . }}{{ .Values.gerritReplica.images.gerritReplica }}:{{ .Values.images.version }}
+ imagePullPolicy: {{ .Values.images.imagePullPolicy }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/bin/ash"
+ - "-c"
+ - "kill -2 $(pidof java) && tail --pid=$(pidof java) -f /dev/null"
+ ports:
+ - name: http
+ containerPort: 8080
+ {{ if .Values.gerritReplica.service.ssh -}}
+ - name: ssh
+ containerPort: 29418
+ {{- end }}
+ volumeMounts:
+ - name: gerrit-site
+ mountPath: "/var/gerrit"
+ - name: git-repositories
+ mountPath: "/var/mnt/git"
+ - name: logs
+ subPathExpr: "gerrit-replica/$(POD_NAME)"
+ mountPath: "/var/mnt/logs"
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ - name: gerrit-config
+ mountPath: "/var/mnt/etc/config"
+ - name: gerrit-replica-secure-config
+ mountPath: "/var/mnt/etc/secret"
+ {{- range .Values.gerritReplica.additionalConfigMaps }}
+ - name: {{ .name }}
+ mountPath: "/var/mnt/data/{{ .subDir }}"
+ {{- end }}
+ livenessProbe:
+ httpGet:
+ path: /config/server/healthcheck~status
+ port: http
+{{- if .Values.gerritReplica.probeScheme }}
+ scheme: {{ .Values.gerritReplica.probeScheme }}
+{{- end }}
+{{ toYaml .Values.gerritReplica.livenessProbe | indent 10 }}
+ readinessProbe:
+ httpGet:
+ path: /config/server/healthcheck~status
+ port: http
+{{- if .Values.gerritReplica.probeScheme }}
+ scheme: {{ .Values.gerritReplica.probeScheme }}
+{{- end }}
+{{ toYaml .Values.gerritReplica.readinessProbe | indent 10 }}
+ startupProbe:
+ httpGet:
+ path: /config/server/healthcheck~status
+ port: http
+{{- if .Values.gerritReplica.probeScheme }}
+ scheme: {{ .Values.gerritReplica.probeScheme }}
+{{- end }}
+{{ toYaml .Values.gerritReplica.startupProbe | indent 10 }}
+ resources:
+{{ toYaml .Values.gerritReplica.resources | indent 10 }}
+ {{ if .Values.istio.enabled -}}
+ - name: istio-proxy
+ image: auto
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/bin/sh"
+ - "-c"
+ - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l | xargs) -ne 0 ]; do sleep 1; done"
+ {{- end }}
+ {{ if .Values.promtailSidecar.enabled -}}
+ - name: promtail
+ image: {{ .Values.promtailSidecar.image }}:v{{ .Values.promtailSidecar.version }}
+ imagePullPolicy: {{ .Values.images.imagePullPolicy }}
+ command:
+ - sh
+ - -ec
+ args:
+ - |-
+ /usr/bin/promtail \
+ -config.file=/etc/promtail/promtail.yaml \
+ -client.url={{ .Values.promtailSidecar.loki.url }}/loki/api/v1/push \
+ -client.external-labels=instance=$HOSTNAME
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ resources:
+{{ toYaml .Values.promtailSidecar.resources | indent 10 }}
+ volumeMounts:
+ - name: promtail-config
+ mountPath: /etc/promtail/promtail.yaml
+ subPath: promtail.yaml
+ - name: promtail-secret
+ mountPath: /etc/promtail/promtail.secret
+ subPath: promtail.secret
+ {{- if not .Values.promtailSidecar.tls.skipVerify }}
+ - name: tls-ca
+ mountPath: /etc/promtail/promtail.ca.crt
+ subPath: ca.crt
+ {{- end }}
+ - name: logs
+ subPathExpr: "gerrit-replica/$(POD_NAME)"
+ mountPath: "/var/gerrit/logs"
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ {{- end }}
+ volumes:
+ {{ if not .Values.gerritReplica.persistence.enabled -}}
+ - name: gerrit-site
+ emptyDir: {}
+ {{- end }}
+ {{- if and .Values.gerritReplica.pluginManagement.cache.enabled }}
+ - name: gerrit-plugin-cache
+ persistentVolumeClaim:
+ claimName: {{ .Release.Name }}-plugin-cache-pvc
+ {{- end }}
+ - name: git-repositories
+ persistentVolumeClaim:
+ {{- if .Values.gitRepositoryStorage.externalPVC.use }}
+ claimName: {{ .Values.gitRepositoryStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-git-repositories-pvc
+ {{- end }}
+ - name: logs
+ {{ if .Values.logStorage.enabled -}}
+ persistentVolumeClaim:
+ {{- if .Values.logStorage.externalPVC.use }}
+ claimName: {{ .Values.logStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-log-pvc
+ {{- end }}
+ {{ else -}}
+ emptyDir: {}
+ {{- end }}
+ - name: gerrit-init-config
+ configMap:
+ name: {{ .Release.Name }}-gerrit-init-configmap
+ - name: gerrit-config
+ configMap:
+ name: {{ .Release.Name }}-gerrit-replica-configmap
+ - name: gerrit-replica-secure-config
+ secret:
+ secretName: {{ .Release.Name }}-gerrit-replica-secure-config
+ {{ if .Values.caCert -}}
+ - name: tls-ca
+ secret:
+ secretName: {{ .Release.Name }}-tls-ca
+ {{- end }}
+ {{- range .Values.gerritReplica.additionalConfigMaps }}
+ - name: {{ .name }}
+ configMap:
+ name: {{ if .data }}{{ $root.Release.Name }}-{{ .name }}{{ else }}{{ .name }}{{ end }}
+ {{- end }}
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ configMap:
+ name: {{ .Release.Name }}-nfs-configmap
+ {{- end }}
+ {{ if .Values.promtailSidecar.enabled -}}
+ - name: promtail-config
+ configMap:
+ name: {{ .Release.Name }}-promtail-gerrit-configmap
+ - name: promtail-secret
+ secret:
+ secretName: {{ .Release.Name }}-promtail-secret
+ {{- end }}
+ {{ if .Values.gerritReplica.persistence.enabled -}}
+ volumeClaimTemplates:
+ - metadata:
+ name: gerrit-site
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 8 }}
+ {{- end }}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.gerritReplica.persistence.size }}
+ storageClassName: {{ .Values.storageClasses.default.name }}
+ {{- end }}
diff --git a/charts/gerrit-replica/templates/gerrit-replica.storage.yaml b/charts/gerrit-replica/templates/gerrit-replica.storage.yaml
new file mode 100644
index 0000000..c710737
--- /dev/null
+++ b/charts/gerrit-replica/templates/gerrit-replica.storage.yaml
@@ -0,0 +1,22 @@
+{{- if and .Values.gerritReplica.pluginManagement.cache.enabled }}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-plugin-cache-pvc
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: {{ .Values.gerritReplica.pluginManagement.cache.size }}
+ storageClassName: {{ .Values.storageClasses.shared.name }}
+{{- end }}
diff --git a/charts/gerrit-replica/templates/git-backend.deployment.yaml b/charts/gerrit-replica/templates/git-backend.deployment.yaml
new file mode 100644
index 0000000..037bcb9
--- /dev/null
+++ b/charts/gerrit-replica/templates/git-backend.deployment.yaml
@@ -0,0 +1,168 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ .Release.Name }}-git-backend-deployment
+ labels:
+ app.kubernetes.io/component: git-backend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ replicas: {{ .Values.gitBackend.replicas }}
+ strategy:
+ rollingUpdate:
+ maxSurge: {{ .Values.gitBackend.maxSurge }}
+ maxUnavailable: {{ .Values.gitBackend.maxUnavailable }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: git-backend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: git-backend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 8 }}
+ {{- end }}
+ {{- if .Values.gitBackend.additionalPodLabels }}
+{{ toYaml .Values.gitBackend.additionalPodLabels | indent 8 }}
+ {{- end }}
+ annotations:
+ chartRevision: "{{ .Release.Revision }}"
+ spec:
+ {{- with .Values.gitBackend.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gitBackend.topologySpreadConstraints }}
+ topologySpreadConstraints:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gitBackend.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gitBackend.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ securityContext:
+ fsGroup: 100
+ {{ if .Values.images.registry.ImagePullSecret.name -}}
+ imagePullSecrets:
+ - name: {{ .Values.images.registry.ImagePullSecret.name }}
+ {{- range .Values.images.additionalImagePullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+ initContainers:
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.chownOnStartup }}
+ - name: nfs-init
+ image: {{ .Values.images.busybox.registry -}}/busybox:{{- .Values.images.busybox.tag }}
+ command:
+ - sh
+ - -c
+ args:
+ - |
+ chown 1000:100 /var/mnt/logs
+ chown 1000:100 /var/mnt/git
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: logs
+ subPathExpr: "gerrit-replica/$(POD_NAME)"
+ mountPath: "/var/mnt/logs"
+ - name: git-repositories
+ mountPath: "/var/mnt/git"
+ {{- if .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ {{- end }}
+ containers:
+ - name: apache-git-http-backend
+ imagePullPolicy: {{ .Values.images.imagePullPolicy }}
+ image: {{ template "registry" . }}{{ .Values.gitBackend.image }}:{{ .Values.images.version }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ ports:
+ - name: http-port
+ containerPort: 80
+ resources:
+{{ toYaml .Values.gitBackend.resources | indent 10 }}
+ livenessProbe:
+ tcpSocket:
+ port: http-port
+{{ toYaml .Values.gitBackend.livenessProbe | indent 10 }}
+ readinessProbe:
+ tcpSocket:
+ port: http-port
+{{ toYaml .Values.gitBackend.readinessProbe | indent 10 }}
+ volumeMounts:
+ - name: git-repositories
+ mountPath: "/var/gerrit/git"
+ - name: logs
+ subPathExpr: "apache-git-http-backend/$(POD_NAME)"
+ mountPath: "/var/log/apache2"
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ - name: git-backend-secret
+ readOnly: true
+ subPath: .htpasswd
+ mountPath: "/var/apache/credentials/.htpasswd"
+ {{ if .Values.istio.enabled -}}
+ - name: istio-proxy
+ image: auto
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/bin/sh"
+ - "-c"
+ - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l | xargs) -ne 0 ]; do sleep 1; done"
+ {{- end }}
+ volumes:
+ - name: git-repositories
+ persistentVolumeClaim:
+ {{- if .Values.gitRepositoryStorage.externalPVC.use }}
+ claimName: {{ .Values.gitRepositoryStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-git-repositories-pvc
+ {{- end }}
+ - name: git-backend-secret
+ secret:
+ secretName: {{ .Release.Name }}-git-backend-secret
+ - name: logs
+ {{ if .Values.logStorage.enabled -}}
+ persistentVolumeClaim:
+ {{- if .Values.logStorage.externalPVC.use }}
+ claimName: {{ .Values.logStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-log-pvc
+ {{- end }}
+ {{ else -}}
+ emptyDir: {}
+ {{- end }}
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ configMap:
+ name: {{ .Release.Name }}-nfs-configmap
+ {{- end }}
diff --git a/charts/gerrit-replica/templates/git-backend.secrets.yaml b/charts/gerrit-replica/templates/git-backend.secrets.yaml
new file mode 100644
index 0000000..94b1705
--- /dev/null
+++ b/charts/gerrit-replica/templates/git-backend.secrets.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-git-backend-secret
+ labels:
+ app.kubernetes.io/component: git-backend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ .htpasswd: {{ required "A .htpasswd-file is required for the git backend." .Values.gitBackend.credentials.htpasswd | b64enc }}
+type: Opaque
diff --git a/charts/gerrit-replica/templates/git-backend.service.yaml b/charts/gerrit-replica/templates/git-backend.service.yaml
new file mode 100644
index 0000000..7bd47ef
--- /dev/null
+++ b/charts/gerrit-replica/templates/git-backend.service.yaml
@@ -0,0 +1,35 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Release.Name }}-git-backend-service
+ labels:
+ app.kubernetes.io/component: git-backend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+ {{- if .Values.gitBackend.service.additionalAnnotations }}
+ annotations:
+{{ toYaml .Values.gitBackend.service.additionalAnnotations | indent 4 }}
+ {{- end }}
+spec:
+ {{ with .Values.gitBackend.service }}
+ {{- if .loadBalancerSourceRanges -}}
+ loadBalancerSourceRanges:
+{{- range .loadBalancerSourceRanges }}
+ - {{ . | quote }}
+{{- end }}
+ {{- end }}
+ ports:
+ - name: http
+ port: {{ .http.port }}
+ targetPort: 80
+ type: {{ .type }}
+ externalTrafficPolicy: {{ .externalTrafficPolicy }}
+ {{- end }}
+ selector:
+ app.kubernetes.io/component: git-backend
+ app.kubernetes.io/instance: {{ .Release.Name }}
diff --git a/charts/gerrit-replica/templates/git-gc.cronjob.yaml b/charts/gerrit-replica/templates/git-gc.cronjob.yaml
new file mode 100644
index 0000000..028ffe9
--- /dev/null
+++ b/charts/gerrit-replica/templates/git-gc.cronjob.yaml
@@ -0,0 +1,134 @@
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: {{ .Release.Name }}-git-gc
+ labels:
+ app.kubernetes.io/component: git-gc
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ schedule: {{ .Values.gitGC.schedule | quote }}
+ concurrencyPolicy: "Forbid"
+ jobTemplate:
+ spec:
+ template:
+ metadata:
+ annotations:
+ cluster-autoscaler.kubernetes.io/safe-to-evict: "false"
+ {{ if .Values.istio.enabled }}
+ sidecar.istio.io/inject: "false"
+ {{- end }}
+ labels:
+ app.kubernetes.io/component: git-gc
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 12 }}
+ {{- end }}
+ {{- if .Values.gitGC.additionalPodLabels }}
+{{ toYaml .Values.gitGC.additionalPodLabels | indent 12 }}
+ {{- end }}
+ spec:
+ {{- with .Values.gitGC.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 10 }}
+ {{- end }}
+ {{- with .Values.gitGC.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.gitGC.affinity }}
+ affinity:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ restartPolicy: OnFailure
+ securityContext:
+ fsGroup: 100
+ {{ if .Values.images.registry.ImagePullSecret.name -}}
+ imagePullSecrets:
+ - name: {{ .Values.images.registry.ImagePullSecret.name }}
+ {{- range .Values.images.additionalImagePullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+ initContainers:
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.chownOnStartup }}
+ - name: nfs-init
+ image: {{ .Values.images.busybox.registry -}}/busybox:{{- .Values.images.busybox.tag }}
+ command:
+ - sh
+ - -c
+ args:
+ - |
+ chown 1000:100 /var/mnt/logs
+ chown 1000:100 /var/mnt/git
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: logs
+ subPathExpr: "git-gc/$(POD_NAME)"
+ mountPath: "/var/mnt/logs"
+ - name: git-repositories
+ mountPath: "/var/mnt/git"
+ {{- if .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ {{- end }}
+ containers:
+ - name: git-gc
+ imagePullPolicy: {{ .Values.images.imagePullPolicy }}
+ image: {{ template "registry" . }}{{ .Values.gitGC.image }}:{{ .Values.images.version }}
+ resources:
+{{ toYaml .Values.gitGC.resources | indent 14 }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: git-repositories
+ mountPath: "/var/gerrit/git"
+ - name: logs
+ subPathExpr: "git-gc/$(POD_NAME)"
+ mountPath: "/var/log/git"
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ volumes:
+ - name: git-repositories
+ persistentVolumeClaim:
+ {{- if .Values.gitRepositoryStorage.externalPVC.use }}
+ claimName: {{ .Values.gitRepositoryStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-git-repositories-pvc
+ {{- end }}
+ - name: logs
+ {{ if .Values.logStorage.enabled -}}
+ persistentVolumeClaim:
+ {{- if .Values.logStorage.externalPVC.use }}
+ claimName: {{ .Values.logStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-log-pvc
+ {{- end }}
+ {{ else -}}
+ emptyDir: {}
+ {{- end }}
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ configMap:
+ name: {{ .Release.Name }}-nfs-configmap
+ {{- end }}
diff --git a/charts/gerrit-replica/templates/global.secrets.yaml b/charts/gerrit-replica/templates/global.secrets.yaml
new file mode 100644
index 0000000..7dfe4a1
--- /dev/null
+++ b/charts/gerrit-replica/templates/global.secrets.yaml
@@ -0,0 +1,18 @@
+{{ if .Values.caCert -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-tls-ca
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ ca.crt: {{ .Values.caCert | b64enc }}
+type: Opaque
+{{- end }}
diff --git a/charts/gerrit-replica/templates/image-pull.secret.yaml b/charts/gerrit-replica/templates/image-pull.secret.yaml
new file mode 100644
index 0000000..3f97cd0
--- /dev/null
+++ b/charts/gerrit-replica/templates/image-pull.secret.yaml
@@ -0,0 +1,13 @@
+{{ if and .Values.images.registry.ImagePullSecret.name .Values.images.registry.ImagePullSecret.create -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Values.images.registry.ImagePullSecret.name }}
+ labels:
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+type: kubernetes.io/dockerconfigjson
+data:
+ .dockerconfigjson: {{ template "imagePullSecret" . }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/gerrit-replica/templates/ingress.yaml b/charts/gerrit-replica/templates/ingress.yaml
new file mode 100644
index 0000000..e78dfcc
--- /dev/null
+++ b/charts/gerrit-replica/templates/ingress.yaml
@@ -0,0 +1,86 @@
+{{ if and .Values.ingress.enabled (not .Values.istio.enabled) -}}
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: {{ .Release.Name }}-ingress
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ nginx.ingress.kubernetes.io/proxy-body-size: {{ .Values.ingress.maxBodySize | default "50m" }}
+ nginx.ingress.kubernetes.io/use-regex: "true"
+ nginx.ingress.kubernetes.io/configuration-snippet: |-
+ if ($args ~ service=git-receive-pack){
+ set $proxy_upstream_name "{{ .Release.Namespace }}-{{ .Release.Name }}-git-backend-service-http";
+ set $proxy_host $proxy_upstream_name;
+ set $service_name "{{ .Release.Name }}-git-backend-service";
+ }
+ {{- if .Values.ingress.additionalAnnotations }}
+{{ toYaml .Values.ingress.additionalAnnotations | indent 4 }}
+ {{- end }}
+spec:
+ {{ if .Values.ingress.tls.enabled -}}
+ tls:
+ - hosts:
+ - {{ .Values.ingress.host }}
+ {{ if .Values.ingress.tls.secret.create -}}
+ secretName: {{ .Release.Name }}-tls-secret
+ {{- else }}
+ secretName: {{ .Values.ingress.tls.secret.name }}
+ {{- end }}
+ {{- end }}
+ rules:
+ - host: {{required "A host URL is required for the ingress. Please set 'ingress.host'" .Values.ingress.host }}
+ http:
+ paths:
+ - pathType: Prefix
+ path: /a/projects
+ backend:
+ service:
+ name: {{ .Release.Name }}-git-backend-service
+ port:
+ number: {{ .Values.gitBackend.service.http.port }}
+ - pathType: Prefix
+ path: "/.*/git-receive-pack"
+ backend:
+ service:
+ name: {{ .Release.Name }}-git-backend-service
+ port:
+ number: {{ .Values.gitBackend.service.http.port }}
+ - pathType: Prefix
+ path: /
+ backend:
+ service:
+ name: {{ .Release.Name }}-gerrit-replica-service
+ port:
+ number: {{ .Values.gerritReplica.service.http.port }}
+{{- end }}
+---
+{{ if and (and .Values.ingress.tls.enabled .Values.ingress.tls.secret.create) (not .Values.istio.enabled) -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-tls-secret
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+type: kubernetes.io/tls
+data:
+ {{ with .Values.ingress.tls -}}
+ tls.crt: {{ .cert | b64enc }}
+ tls.key: {{ .key | b64enc }}
+ {{- end }}
+{{- end }}
diff --git a/charts/gerrit-replica/templates/istio.ingressgateway.yaml b/charts/gerrit-replica/templates/istio.ingressgateway.yaml
new file mode 100644
index 0000000..5938536
--- /dev/null
+++ b/charts/gerrit-replica/templates/istio.ingressgateway.yaml
@@ -0,0 +1,144 @@
+{{ if .Values.istio.enabled -}}
+{{ if and .Values.istio.tls.enabled .Values.istio.tls.secret.create }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-istio-tls-secret
+ namespace: istio-system
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+type: kubernetes.io/tls
+data:
+ {{ with .Values.istio.tls -}}
+ tls.crt: {{ .cert | b64enc }}
+ tls.key: {{ .key | b64enc }}
+ {{- end }}
+{{- end }}
+---
+apiVersion: networking.istio.io/v1alpha3
+kind: Gateway
+metadata:
+ name: {{ .Release.Name }}-istio-gateway
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ selector:
+ istio: ingressgateway
+ servers:
+ - port:
+ number: 80
+ name: http
+ protocol: HTTP
+ hosts:
+ - {{ .Values.istio.host }}
+ {{ if .Values.istio.tls.enabled }}
+ tls:
+ httpsRedirect: true
+ - port:
+ number: 443
+ name: https
+ protocol: HTTPS
+ hosts:
+ - {{ .Values.istio.host }}
+ tls:
+ mode: SIMPLE
+ {{ if .Values.istio.tls.secret.create }}
+ credentialName: {{ .Release.Name }}-istio-tls-secret
+ {{- else }}
+ credentialName: {{ .Values.istio.tls.secret.name }}
+ {{- end }}
+ {{- end }}
+ {{ if .Values.istio.ssh.enabled }}
+ - port:
+ number: 29418
+ name: ssh
+ protocol: TCP
+ hosts:
+ - {{ .Values.istio.host }}
+ {{- end }}
+---
+apiVersion: networking.istio.io/v1alpha3
+kind: VirtualService
+metadata:
+ name: {{ .Release.Name }}-istio-virtual-service
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ hosts:
+ - {{ .Values.istio.host }}
+ gateways:
+ - {{ .Release.Name }}-istio-gateway
+ http:
+ - name: apache-git-http-backend
+ match:
+ - uri:
+ prefix: "/a/projects/"
+ - uri:
+ regex: "/.*/git-receive-pack"
+ - uri:
+ regex: "/.*/info/refs"
+ queryParams:
+ service:
+ exact: git-receive-pack
+ route:
+ - destination:
+ host: {{ .Release.Name }}-git-backend-service.{{ .Release.Namespace }}.svc.cluster.local
+ port:
+ number: 80
+ - name: gerrit-replica
+ route:
+ - destination:
+ host: {{ .Release.Name }}-gerrit-replica-service.{{ .Release.Namespace }}.svc.cluster.local
+ port:
+ number: 80
+ {{ if .Values.istio.ssh.enabled }}
+ tcp:
+ - match:
+ - port: {{ .Values.gerritReplica.service.ssh.port }}
+ route:
+ - destination:
+ host: {{ .Release.Name }}-gerrit-replica-service.{{ .Release.Namespace }}.svc.cluster.local
+ port:
+ number: {{ .Values.gerritReplica.service.ssh.port }}
+ {{- end }}
+---
+apiVersion: networking.istio.io/v1alpha3
+kind: DestinationRule
+metadata:
+ name: {{ .Release.Name }}-gerrit-destination-rule
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ host: {{ .Release.Name }}-gerrit-replica-service.{{ .Release.Namespace }}.svc.cluster.local
+ trafficPolicy:
+ loadBalancer:
+ simple: LEAST_CONN
+{{- end }}
diff --git a/charts/gerrit-replica/templates/log-cleaner.cronjob.yaml b/charts/gerrit-replica/templates/log-cleaner.cronjob.yaml
new file mode 100644
index 0000000..cbeb88f
--- /dev/null
+++ b/charts/gerrit-replica/templates/log-cleaner.cronjob.yaml
@@ -0,0 +1,69 @@
+{{- if and .Values.logStorage.enabled .Values.logStorage.cleanup.enabled }}
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: {{ .Release.Name }}-log-cleaner
+ labels:
+ app.kubernetes.io/component: log-cleaner
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ schedule: {{ .Values.logStorage.cleanup.schedule | quote }}
+ concurrencyPolicy: "Forbid"
+ jobTemplate:
+ spec:
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: log-cleaner
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 12 }}
+ {{- end }}
+ {{- if .Values.logStorage.cleanup.additionalPodLabels }}
+{{ toYaml .Values.logStorage.cleanup.additionalPodLabels | indent 12 }}
+ {{- end }}
+ {{ if .Values.istio.enabled -}}
+ annotations:
+ sidecar.istio.io/inject: "false"
+ {{- end }}
+ spec:
+ restartPolicy: OnFailure
+ containers:
+ - name: log-cleaner
+ imagePullPolicy: {{ .Values.images.imagePullPolicy }}
+ image: {{ .Values.images.busybox.registry -}}/busybox:{{- .Values.images.busybox.tag }}
+ command:
+ - sh
+ - -c
+ args:
+ - |
+ find /var/logs/ \
+ -mindepth 1 \
+ -type f \
+ -mtime +{{ .Values.logStorage.cleanup.retentionDays }} \
+ -print \
+ -delete
+ find /var/logs/ -type d -empty -delete
+ resources:
+{{ toYaml .Values.logStorage.cleanup.resources | indent 14 }}
+ volumeMounts:
+ - name: logs
+ mountPath: "/var/logs"
+ volumes:
+ - name: logs
+ persistentVolumeClaim:
+ {{- if .Values.logStorage.externalPVC.use }}
+ claimName: {{ .Values.logStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-log-pvc
+ {{- end }}
+{{- end }}
diff --git a/charts/gerrit-replica/templates/netpol.yaml b/charts/gerrit-replica/templates/netpol.yaml
new file mode 100644
index 0000000..72a2bbd
--- /dev/null
+++ b/charts/gerrit-replica/templates/netpol.yaml
@@ -0,0 +1,248 @@
+{{ if .Values.networkPolicies.enabled -}}
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: {{ .Release.Name }}-default-deny-all
+ labels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ release: {{ .Release.Name }}
+ policyTypes:
+ - Ingress
+ - Egress
+ ingress: []
+ egress: []
+---
+{{ if .Values.networkPolicies.dnsPorts -}}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: {{ .Release.Name }}-allow-dns-access
+ labels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ release: {{ .Release.Name }}
+ policyTypes:
+ - Egress
+ egress:
+ - ports:
+ {{ range .Values.networkPolicies.dnsPorts -}}
+ - port: {{ . }}
+ protocol: UDP
+ - port: {{ . }}
+ protocol: TCP
+ {{ end }}
+{{- end }}
+---
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: gerrit-replica-allow-external
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ release: {{ .Release.Name }}
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ ingress:
+ - ports:
+ - port: 8080
+ from: []
+---
+{{ if or .Values.gitBackend.networkPolicy.ingress -}}
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: git-backend-custom-ingress-policies
+ labels:
+ app.kubernetes.io/component: git-backend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ release: {{ .Release.Name }}
+ app.kubernetes.io/component: git-backend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ ingress:
+{{ toYaml .Values.gitBackend.networkPolicy.ingress | indent 2 }}
+{{- end }}
+---
+{{ if or .Values.gitBackend.networkPolicy.egress -}}
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: git-backend-custom-egress-policies
+ labels:
+ app.kubernetes.io/component: git-backend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ release: {{ .Release.Name }}
+ app.kubernetes.io/component: git-backend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ egress:
+{{ toYaml .Values.gitBackend.networkPolicy.egress | indent 2 }}
+{{- end }}
+---
+{{ if or .Values.gerritReplica.networkPolicy.ingress -}}
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: gerrit-replica-custom-ingress-policies
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ release: {{ .Release.Name }}
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ ingress:
+{{ toYaml .Values.gerritReplica.networkPolicy.ingress | indent 2 }}
+{{- end }}
+---
+{{ if or .Values.gerritReplica.networkPolicy.egress -}}
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: gerrit-replica-custom-egress-policies
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ release: {{ .Release.Name }}
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ egress:
+{{ toYaml .Values.gerritReplica.networkPolicy.egress | indent 2 }}
+{{- end }}
+---
+{{ if or .Values.istio.enabled -}}
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: istio-proxy
+ labels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ policyTypes:
+ - Egress
+ - Ingress
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ release: {{ .Release.Name }}
+ egress:
+ - ports:
+ - protocol: TCP
+ port: 15012
+ ingress:
+ - ports:
+ - protocol: TCP
+ port: 15012
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: {{ .Release.Name }}-istio-ingress
+ labels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ release: {{ .Release.Name }}
+ ingress:
+ - ports:
+ - protocol: TCP
+ port: 80
+ {{ if .Values.istio.ssh.enabled }}
+ - protocol: TCP
+ port: {{ .Values.gerritReplica.service.ssh.port }}
+ {{- end }}
+ from:
+ - namespaceSelector:
+ matchLabels:
+ name: istio-system
+ - podSelector:
+ matchLabels:
+ istio: ingressgateway
+
+{{- end }}
+{{- end }}
diff --git a/charts/gerrit-replica/templates/nfs.configmap.yaml b/charts/gerrit-replica/templates/nfs.configmap.yaml
new file mode 100644
index 0000000..32b167b
--- /dev/null
+++ b/charts/gerrit-replica/templates/nfs.configmap.yaml
@@ -0,0 +1,28 @@
+{{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Release.Name }}-nfs-configmap
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ idmapd.conf: |-
+ [General]
+
+ Verbosity = 0
+ Pipefs-Directory = /run/rpc_pipefs
+ # set your own domain here, if it differs from FQDN minus hostname
+ Domain = {{ .Values.nfsWorkaround.idDomain }}
+
+ [Mapping]
+
+ Nobody-User = nobody
+ Nobody-Group = nogroup
+{{- end }}
diff --git a/charts/gerrit-replica/templates/promtail.configmap.yaml b/charts/gerrit-replica/templates/promtail.configmap.yaml
new file mode 100644
index 0000000..8dac380
--- /dev/null
+++ b/charts/gerrit-replica/templates/promtail.configmap.yaml
@@ -0,0 +1,94 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Release.Name }}-promtail-gerrit-configmap
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ promtail.yaml: |-
+ positions:
+ filename: /var/gerrit/logs/promtail-positions.yaml
+
+ client:
+ tls_config:
+ insecure_skip_verify: {{ .Values.promtailSidecar.tls.skipVerify }}
+ {{- if not .Values.promtailSidecar.tls.skipVerify }}
+ ca_file: /etc/promtail/promtail.ca.crt
+ {{- end }}
+ basic_auth:
+ username: {{ .Values.promtailSidecar.loki.user }}
+ password_file: /etc/promtail/promtail.secret
+ scrape_configs:
+ - job_name: gerrit_error
+ static_configs:
+ - targets:
+ - localhost
+ labels:
+ job: gerrit_error
+ __path__: /var/gerrit/logs/error_log.json
+ entry_parser: raw
+ pipeline_stages:
+ - json:
+ expressions:
+ timestamp: '"@timestamp"'
+ message:
+ - template:
+ source: timestamp
+ template: {{`'{{ Replace .Value "," "." 1 }}'`}}
+ - template:
+ source: timestamp
+ template: {{`'{{ Replace .Value "Z" " +0000" 1 }}'`}}
+ - template:
+ source: timestamp
+ template: {{`'{{ Replace .Value "T" " " 1 }}'`}}
+ - timestamp:
+ source: timestamp
+ format: "2006-01-02 15:04:05.999 -0700"
+ - regex:
+ source: message
+ expression: "Gerrit Code Review (?P<gerrit_version>.*) ready"
+ - labels:
+ gerrit_version:
+ - job_name: gerrit_httpd
+ static_configs:
+ - targets:
+ - localhost
+ labels:
+ job: gerrit_httpd
+ __path__: /var/gerrit/logs/httpd_log.json
+ entry_parser: raw
+ pipeline_stages:
+ - json:
+ expressions:
+ timestamp: null
+ - template:
+ source: timestamp
+ template: {{`'{{ Replace .Value "," "." 1 }}'`}}
+ - timestamp:
+ format: 02/Jan/2006:15:04:05.999 -0700
+ source: timestamp
+ - job_name: gerrit_sshd
+ static_configs:
+ - targets:
+ - localhost
+ labels:
+ job: gerrit_sshd
+ __path__: /var/gerrit/logs/sshd_log.json
+ entry_parser: raw
+ pipeline_stages:
+ - json:
+ expressions:
+ timestamp:
+ - template:
+ source: timestamp
+ template: {{`'{{ Replace .Value "," "." 1 }}'`}}
+ - timestamp:
+ source: timestamp
+ format: 2006-01-02 15:04:05.999 -0700
diff --git a/charts/gerrit-replica/templates/promtail.secret.yaml b/charts/gerrit-replica/templates/promtail.secret.yaml
new file mode 100644
index 0000000..012fb5b
--- /dev/null
+++ b/charts/gerrit-replica/templates/promtail.secret.yaml
@@ -0,0 +1,18 @@
+{{ if .Values.promtailSidecar.enabled -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-promtail-secret
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+type: Opaque
+data:
+ promtail.secret: {{ .Values.promtailSidecar.loki.password | b64enc }}
+{{- end }}
diff --git a/charts/gerrit-replica/templates/storage.pvc.yaml b/charts/gerrit-replica/templates/storage.pvc.yaml
new file mode 100644
index 0000000..5f8974e
--- /dev/null
+++ b/charts/gerrit-replica/templates/storage.pvc.yaml
@@ -0,0 +1,27 @@
+{{- if not .Values.gitRepositoryStorage.externalPVC.use }}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-git-repositories-pvc
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: {{ .Values.gitRepositoryStorage.size }}
+ storageClassName: {{ .Values.storageClasses.shared.name }}
+{{- end }}
+{{- if and .Values.logStorage.enabled (not .Values.logStorage.externalPVC.use) }}
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-log-pvc
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: {{ .Values.logStorage.size }}
+ storageClassName: {{ .Values.storageClasses.shared.name }}
+{{- end }}
diff --git a/charts/gerrit-replica/templates/storageclasses.yaml b/charts/gerrit-replica/templates/storageclasses.yaml
new file mode 100644
index 0000000..fb91856
--- /dev/null
+++ b/charts/gerrit-replica/templates/storageclasses.yaml
@@ -0,0 +1,57 @@
+{{ if .Values.storageClasses.default.create -}}
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: {{ .Values.storageClasses.default.name }}
+ labels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+provisioner: {{ .Values.storageClasses.default.provisioner }}
+reclaimPolicy: {{ .Values.storageClasses.default.reclaimPolicy }}
+{{ if .Values.storageClasses.default.parameters -}}
+parameters:
+{{- range $key, $value := .Values.storageClasses.default.parameters }}
+ {{ $key }}: {{ $value }}
+{{- end }}
+{{ if .Values.storageClasses.default.mountOptions -}}
+mountOptions:
+{{- range .Values.storageClasses.default.mountOptions }}
+ - {{ . }}
+{{- end }}
+{{- end }}
+allowVolumeExpansion: {{ .Values.storageClasses.default.allowVolumeExpansion }}
+{{- end }}
+{{- end }}
+---
+{{ if .Values.storageClasses.shared.create -}}
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: {{ .Values.storageClasses.shared.name }}
+ labels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+provisioner: {{ .Values.storageClasses.shared.provisioner }}
+reclaimPolicy: {{ .Values.storageClasses.shared.reclaimPolicy }}
+{{ if .Values.storageClasses.shared.parameters -}}
+parameters:
+{{- range $key, $value := .Values.storageClasses.shared.parameters }}
+ {{ $key }}: {{ $value }}
+{{- end }}
+{{ if .Values.storageClasses.shared.mountOptions -}}
+mountOptions:
+{{- range .Values.storageClasses.shared.mountOptions }}
+ - {{ . }}
+{{- end }}
+{{- end }}
+allowVolumeExpansion: {{ .Values.storageClasses.shared.allowVolumeExpansion }}
+{{- end }}
+{{- end }}
diff --git a/charts/gerrit-replica/values.yaml b/charts/gerrit-replica/values.yaml
new file mode 100644
index 0000000..f2ba93c
--- /dev/null
+++ b/charts/gerrit-replica/values.yaml
@@ -0,0 +1,433 @@
+images:
+ busybox:
+ registry: docker.io
+ tag: latest
+ # Registry used for container images created by this project
+ registry:
+ # The registry name must NOT contain a trailing slash
+ name:
+ ImagePullSecret:
+ # Leave blank, if no ImagePullSecret is needed.
+ name: image-pull-secret
+ # If set to false, the gerrit-replica chart expects either a ImagePullSecret
+ # with the name configured above to be present on the cluster or that no
+ # credentials are needed.
+ create: false
+ username:
+ password:
+ version: latest
+ imagePullPolicy: Always
+ # Additional ImagePullSecrets that already exist and should be used by the
+ # pods of this chart. E.g. to pull busybox from dockerhub.
+ additionalImagePullSecrets: []
+
+# Additional labels that should be applied to all resources
+additionalLabels: {}
+
+storageClasses:
+ # Storage class used for storing logs and other pod-specific persisted data
+ default:
+ # If create is set to false, an existing StorageClass with the given
+ # name is expected to exist in the cluster. Setting create to true will
+ # create a storage class with the parameters given below.
+ name: default
+ create: false
+ provisioner: kubernetes.io/aws-ebs
+ reclaimPolicy: Delete
+ # Use the parameters key to set all parameters needed for the provisioner
+ parameters:
+ type: gp2
+ fsType: ext4
+ mountOptions: []
+ allowVolumeExpansion: false
+ # Storage class used for storing git repositories. Has to provide RWM access.
+ shared:
+ # If create is set to false, an existing StorageClass with RWM access
+ # mode and the given name has to be provided.
+ name: shared-storage
+ create: false
+ provisioner: nfs
+ reclaimPolicy: Delete
+ # Use the parameters key to set all parameters needed for the provisioner
+ parameters:
+ mountOptions: vers=4.1
+ mountOptions: []
+ allowVolumeExpansion: false
+
+nfsWorkaround:
+ enabled: false
+ chownOnStartup: false
+ idDomain: localdomain.com
+
+
+networkPolicies:
+ enabled: false
+ dnsPorts:
+ - 53
+ - 8053
+
+
+gitRepositoryStorage:
+ externalPVC:
+ use: false
+ name: git-repositories-pvc
+ size: 5Gi
+
+
+logStorage:
+ enabled: false
+ externalPVC:
+ use: false
+ name: gerrit-logs-pvc
+ size: 5Gi
+ cleanup:
+ enabled: false
+ additionalPodLabels: {}
+ schedule: "0 0 * * *"
+ retentionDays: 14
+ resources:
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ limits:
+ cpu: 100m
+ memory: 256Mi
+
+
+istio:
+ enabled: false
+ host:
+ tls:
+ enabled: false
+ secret:
+ # If using an external secret, make sure to name the keys `tls.crt`
+ # and `tls.key`, respectively.
+ create: true
+ # `name` will only be used, if `create` is set to false to bind an
+ # existing secret. Otherwise the name will be automatically generated to
+ # avoid conflicts between multiple chart installations.
+ name:
+ # `cert`and `key` will only be used, if the secret will be created by
+ # this chart.
+ cert: |-
+ -----BEGIN CERTIFICATE-----
+
+ -----END CERTIFICATE-----
+ key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+
+ -----END RSA PRIVATE KEY-----
+ ssh:
+ enabled: false
+
+caCert:
+
+ingress:
+ enabled: false
+ host:
+ # The maximum body size to allow for requests. Use "0" to allow unlimited
+ # reuqest body sizes.
+ maxBodySize: 50m
+ additionalAnnotations:
+ kubernetes.io/ingress.class: nginx
+ # nginx.ingress.kubernetes.io/server-alias: example.com
+ # nginx.ingress.kubernetes.io/whitelist-source-range: xxx.xxx.xxx.xxx
+ tls:
+ enabled: false
+ secret:
+ # If using an external secret, make sure to name the keys `tls.crt`
+ # and `tls.key`, respectively.
+ create: true
+ # `name` will only be used, if `create` is set to false to bind an
+ # existing secret. Otherwise the name will be automatically generated to
+ # avoid conflicts between multiple chart installations.
+ name:
+ # `cert`and `key` will only be used, if the secret will be created by
+ # this chart.
+ cert: |-
+ -----BEGIN CERTIFICATE-----
+
+ -----END CERTIFICATE-----
+ key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+
+ -----END RSA PRIVATE KEY-----
+
+promtailSidecar:
+ enabled: false
+ image: grafana/promtail
+ version: 1.3.0
+ resources:
+ requests:
+ cpu: 100m
+ memory: 128Mi
+ limits:
+ cpu: 200m
+ memory: 128Mi
+ tls:
+ skipVerify: true
+ loki:
+ url: loki.example.com
+ user: admin
+ password: secret
+
+
+gitBackend:
+ image: k8sgerrit/apache-git-http-backend
+
+ additionalPodLabels: {}
+ tolerations: []
+ topologySpreadConstraints: {}
+ nodeSelector: {}
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - git-backend
+ topologyKey: "topology.kubernetes.io/zone"
+
+ replicas: 1
+ maxSurge: 25%
+ # For just one replica, 100 % unavailability has to be allowed for updates to
+ # work.
+ maxUnavailable: 100%
+
+ # The general NetworkPolicy rules implemented by this chart may be too restrictive
+ # for some setups. Here custom rules may be added to whitelist some additional
+ # connections.
+ networkPolicy:
+ # This allows ingress traffic from all sources. If possible, this should be
+ # limited to the respective primary Gerrit that replicates to this replica.
+ ingress:
+ - {}
+ egress: []
+
+ resources:
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ limits:
+ cpu: 100m
+ memory: 256Mi
+
+ livenessProbe:
+ initialDelaySeconds: 10
+ periodSeconds: 5
+
+ readinessProbe:
+ initialDelaySeconds: 5
+ periodSeconds: 1
+
+ service:
+ additionalAnnotations: {}
+ loadBalancerSourceRanges: []
+ type: NodePort
+ externalTrafficPolicy: Cluster
+ http:
+ port: 80
+
+ credentials:
+ # example: user: 'git'; password: 'secret'
+ # run `man htpasswd` to learn about how to create .htpasswd-files
+ htpasswd: git:$apr1$O/LbLKC7$Q60GWE7OcqSEMSfe/K8xU.
+ # TODO: Create htpasswd-file on container startup instead and set user
+ # and password in values.yaml.
+ #user:
+ #password:
+
+
+gitGC:
+ image: k8sgerrit/git-gc
+
+ tolerations: []
+ nodeSelector: {}
+ affinity: {}
+ additionalPodLabels: {}
+
+ schedule: 0 6,18 * * *
+
+ resources:
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ limits:
+ cpu: 100m
+ memory: 256Mi
+
+gerritReplica:
+ images:
+ gerritInit: k8sgerrit/gerrit-init
+ gerritReplica: k8sgerrit/gerrit
+
+ tolerations: []
+ topologySpreadConstraints: {}
+ nodeSelector: {}
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - gerrit-replica
+ topologyKey: "topology.kubernetes.io/zone"
+
+ replicas: 1
+ updatePartition: 0
+ additionalAnnotations: {}
+ additionalPodLabels: {}
+
+ # If no value for probeScheme, the probe will use the default HTTP
+ probeScheme: HTTP
+
+ livenessProbe:
+ initialDelaySeconds: 60
+ periodSeconds: 5
+
+ readinessProbe:
+ initialDelaySeconds: 10
+ periodSeconds: 10
+
+ startupProbe:
+ initialDelaySeconds: 10
+ periodSeconds: 30
+
+ gracefulStopTimeout: 90
+
+ # The memory limit has to be higher than the configures heap-size for Java!
+ resources:
+ requests:
+ cpu: 1
+ memory: 5Gi
+ limits:
+ cpu: 1
+ memory: 6Gi
+
+ persistence:
+ enabled: true
+ size: 5Gi
+
+ # The general NetworkPolicy rules implemented by this chart may be too restrictive
+ # for some setups, e.g. when trying to connect to an external database. Here
+ # custom rules may be added to whitelist some additional connections.
+ networkPolicy:
+ ingress: []
+ egress: []
+
+ service:
+ additionalAnnotations: {}
+ loadBalancerSourceRanges: []
+ type: NodePort
+ externalTrafficPolicy: Cluster
+ http:
+ port: 80
+ ssh:
+ enabled: false
+ port: 29418
+
+ # `gerritReplica.keystore` expects a base64-encoded Java-keystore
+ # Since Java keystores are binary files, adding the unencoded content and
+ # automatic encoding using helm does not work here.
+ keystore:
+
+ pluginManagement:
+ plugins: []
+ # A plugin packaged in the gerrit.war-file
+ # - name: download-commands
+
+ # A plugin packaged in the gerrit.war-file that will also be installed as a
+ # lib
+ # - name: replication
+ # installAsLibrary: true
+
+ # A plugin that will be downloaded on startup
+ # - name: delete-project
+ # url: https://example.com/gerrit-plugins/delete-project.jar
+ # sha1:
+ # installAsLibrary: false
+
+ # Only downloaded plugins will be cached. This will be ignored, if no plugins
+ # are downloaded.
+ libs: []
+ cache:
+ enabled: false
+ size: 1Gi
+
+ priorityClassName:
+
+ etc:
+ # Some values are expected to have a specific value for the deployment installed
+ # by this chart to work. These are marked with `# FIXED`.
+ # Do not change them!
+ config:
+ gerrit.config: |-
+ [gerrit]
+ basePath = git # FIXED
+ serverId = gerrit-replica-1
+ # The canonical web URL has to be set to the Ingress host, if an Ingress
+ # is used. If a LoadBalancer-service is used, this should be set to the
+ # LoadBalancer's external IP. This can only be done manually after installing
+ # the chart, when you know the external IP the LoadBalancer got from the
+ # cluster.
+ canonicalWebUrl = http://example.com/
+ disableReverseDnsLookup = true
+ [index]
+ type = LUCENE
+ [index "scheduledIndexer"]
+ runOnStartup = false
+ [auth]
+ type = DEVELOPMENT_BECOME_ANY_ACCOUNT
+ [httpd]
+ # If using an ingress use proxy-http or proxy-https
+ listenUrl = proxy-http://*:8080/
+ requestLog = true
+ gracefulStopTimeout = 1m
+ [sshd]
+ listenAddress = *:29418
+ gracefulStopTimeout = 1m
+ [transfer]
+ timeout = 120 s
+ [user]
+ name = Gerrit Code Review
+ email = gerrit@example.com
+ anonymousCoward = Unnamed User
+ [cache]
+ directory = cache
+ [container]
+ user = gerrit # FIXED
+ replica = true # FIXED
+ javaHome = /usr/lib/jvm/java-17-openjdk # FIXED
+ javaOptions = -Djavax.net.ssl.trustStore=/var/gerrit/etc/keystore # FIXED
+ javaOptions = -Xms200m
+ # Has to be lower than 'gerritReplica.resources.limits.memory'. Also
+ # consider memories used by other applications in the container.
+ javaOptions = -Xmx4g
+
+ secret:
+ secure.config: |-
+ # Password for the keystore added as value for 'gerritReplica.keystore'
+ # Only needed, if SSL is enabled.
+ #[httpd]
+ # sslKeyPassword = gerrit
+
+ # ssh_host_ecdsa_key: |-
+ # -----BEGIN EC PRIVATE KEY-----
+
+ # -----END EC PRIVATE KEY-----
+
+ # ssh_host_ecdsa_key.pub: ecdsa-sha2-nistp256...
+
+ additionalConfigMaps:
+ # - name:
+ # subDir:
+ # data:
+ # file.txt: test
diff --git a/charts/gerrit/.helmignore b/charts/gerrit/.helmignore
new file mode 100644
index 0000000..4f4562f
--- /dev/null
+++ b/charts/gerrit/.helmignore
@@ -0,0 +1,24 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+
+docs/
+supplements/
diff --git a/charts/gerrit/Chart.yaml b/charts/gerrit/Chart.yaml
new file mode 100644
index 0000000..cafaa43
--- /dev/null
+++ b/charts/gerrit/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v2
+appVersion: 3.9.1
+description: |-
+ Gerrit is a free, web-based team code collaboration tool. Software developers
+ in a team can review each other's modifications on their source code using
+ a Web browser and approve or reject those changes. It integrates closely with
+ Git, a distributed version control system. [1]
+
+ [1](https://en.wikipedia.org/wiki/Gerrit_(software)
+name: gerrit
+version: 0.2.0
+maintainers:
+- name: Thomas Draebing
+ email: thomas.draebing@sap.com
+- name: Matthias Sohn
+ email: matthias.sohn@sap.com
+- name: Sasa Zivkov
+ email: sasa.zivkov@sap.com
+- name: Christian Halstrick
+ email: christian.halstrick@sap.com
+home: https://gerrit.googlesource.com/k8s-gerrit/+/master/helm-charts/gerrit-replica
+icon: http://commondatastorage.googleapis.com/gerrit-static/diffy-w200.png
+sources:
+- https://gerrit.googlesource.com/k8s-gerrit/+/master/
+keywords:
+- gerrit
+- git
diff --git a/charts/gerrit/LICENSE b/charts/gerrit/LICENSE
new file mode 100644
index 0000000..028fc9f
--- /dev/null
+++ b/charts/gerrit/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright (C) 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/charts/gerrit/README.md b/charts/gerrit/README.md
new file mode 100644
index 0000000..8300e0a
--- /dev/null
+++ b/charts/gerrit/README.md
@@ -0,0 +1,460 @@
+# Gerrit on Kubernetes
+
+Gerrit is a web-based code review tool, which acts as a Git server. This helm
+chart provides a Gerrit setup that can be deployed on Kubernetes.
+In addition, the chart provides a CronJob to perform Git garbage collection.
+
+***note
+Gerrit versions before 3.0 are no longer supported, since the support of ReviewDB
+was removed.
+***
+
+## Prerequisites
+
+- Helm (>= version 3.0)
+
+ (Check out [this guide](https://docs.helm.sh/using_helm/#quickstart-guide)
+ how to install and use helm.)
+
+- Access to a provisioner for persistent volumes with `Read Write Many (RWM)`-
+ capability.
+
+ A list of applicaple volume types can be found
+ [here](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes).
+ This project was developed using the
+ [NFS-server-provisioner helm chart](https://github.com/helm/charts/tree/master/stable/nfs-server-provisioner),
+ a NFS-provisioner deployed in the Kubernetes cluster itself. Refer to
+ [this guide](/helm-charts/gerrit/docs/nfs-provisioner.md) of how to
+ deploy it in context of this project.
+
+- A domain name that is configured to point to the IP address of the node running
+ the Ingress controller on the kubernetes cluster (as described
+ [here](http://alesnosek.com/blog/2017/02/14/accessing-kubernetes-pods-from-outside-of-the-cluster/)).
+
+- (Optional: Required, if SSL is configured)
+ A [Java keystore](https://gerrit-review.googlesource.com/Documentation/config-gerrit.html#httpd.sslKeyStore)
+ to be used by Gerrit.
+
+## Installing the Chart
+
+***note
+**ATTENTION:** The value for `ingress.host` is required for rendering
+the chart's templates. The nature of the value does not allow defaults.
+Thus a custom `values.yaml`-file setting this value is required!
+***
+
+To install the chart with the release name `gerrit`, execute:
+
+```sh
+cd $(git rev-parse --show-toplevel)/helm-charts
+helm install \
+ gerrit \ # release name
+ ./gerrit \ # path to chart
+ -f <path-to-custom-values>.yaml
+```
+
+The command deploys the Gerrit instance on the current Kubernetes cluster.
+The [configuration section](#Configuration) lists the parameters that can be
+configured during installation.
+
+## Configuration
+
+The following sections list the configurable values in `values.yaml`. To configure
+a Gerrit setup, make a copy of the `values.yaml`-file and change the parameters
+as needed. The configuration can be applied by installing the chart as described
+[above](#Installing-the-chart).
+
+In addition, single options can be set without creating a custom `values.yaml`:
+
+```sh
+cd $(git rev-parse --show-toplevel)/helm-charts
+helm install \
+ gerrit \ # release name
+ ./gerrit \ # path to chart
+ --set=gitRepositoryStorage.size=100Gi
+```
+
+### Container images
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `images.busybox.registry` | The registry to pull the busybox container images from | `docker.io` |
+| `images.busybox.tag` | The busybox image tag to use | `latest` |
+| `images.registry.name` | The image registry to pull the container images from | `` |
+| `images.registry.ImagePullSecret.name` | Name of the ImagePullSecret | `image-pull-secret` (if empty no image pull secret will be deployed) |
+| `images.registry.ImagePullSecret.create` | Whether to create an ImagePullSecret | `false` |
+| `images.registry.ImagePullSecret.username` | The image registry username | `nil` |
+| `images.registry.ImagePullSecret.password` | The image registry password | `nil` |
+| `images.version` | The image version (image tag) to use | `latest` |
+| `images.imagePullPolicy` | Image pull policy | `Always` |
+| `images.additionalImagePullSecrets` | Additional image pull policies that pods should use | `[]` |
+
+### Labels
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `additionalLabels` | Additional labels for resources managed by this Helm chart | `{}` |
+
+### Storage classes
+
+For information of how a `StorageClass` is configured in Kubernetes, read the
+[official Documentation](https://kubernetes.io/docs/concepts/storage/storage-classes/#introduction).
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `storageClasses.default.name` | The name of the default StorageClass (RWO) | `default` |
+| `storageClasses.default.create` | Whether to create the StorageClass | `false` |
+| `storageClasses.default.provisioner` | Provisioner of the StorageClass | `kubernetes.io/aws-ebs` |
+| `storageClasses.default.reclaimPolicy` | Whether to `Retain` or `Delete` volumes, when they become unbound | `Delete` |
+| `storageClasses.default.parameters` | Parameters for the provisioner | `parameters.type: gp2`, `parameters.fsType: ext4` |
+| `storageClasses.default.mountOptions` | The mount options of the default StorageClass | `[]` |
+| `storageClasses.default.allowVolumeExpansion` | Whether to allow volume expansion. | `false` |
+| `storageClasses.shared.name` | The name of the shared StorageClass (RWM) | `shared-storage` |
+| `storageClasses.shared.create` | Whether to create the StorageClass | `false` |
+| `storageClasses.shared.provisioner` | Provisioner of the StorageClass | `nfs` |
+| `storageClasses.shared.reclaimPolicy` | Whether to `Retain` or `Delete` volumes, when they become unbound | `Delete` |
+| `storageClasses.shared.parameters` | Parameters for the provisioner | `parameters.mountOptions: vers=4.1` |
+| `storageClasses.shared.mountOptions` | The mount options of the shared StorageClass | `[]` |
+| `storageClasses.shared.allowVolumeExpansion` | Whether to allow volume expansion. | `false` |
+
+### Network policies
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `networkPolicies.enabled` | Whether to enable preconfigured NetworkPolicies | `false` |
+| `networkPolicies.dnsPorts` | List of ports used by DNS-service (e.g. KubeDNS) | `[53, 8053]` |
+
+The NetworkPolicies provided here are quite strict and do not account for all
+possible scenarios. Thus, custom NetworkPolicies have to be added, e.g. for
+allowing Gerrit to replicate to a Gerrit replica. By default, the egress traffic
+of the gerrit pod is blocked, except for connections to the DNS-server.
+Thus, replication which requires Gerrit to perform git pushes to the replica will
+not work. The chart provides the possibility to define custom rules for egress-
+traffic of the gerrit pod under `gerrit.networkPolicy.egress`.
+Depending on the scenario, there are different ways to allow the required
+connections. The easiest way is to allow all egress-traffic for the gerrit
+pods:
+
+```yaml
+gerrit:
+ networkPolicy:
+ egress:
+ - {}
+```
+
+If the remote that is replicated to is running in a pod on the same cluster and
+the service-DNS is used as the remote's URL (e.g. http://gerrit-replica-git-backend-service:80/git/${name}.git),
+a podSelector (and namespaceSelector, if the pod is running in a different
+namespace) can be used to whitelist the traffic:
+
+```yaml
+gerrit:
+ networkPolicy:
+ egress:
+ - to:
+ - podSelector:
+ matchLabels:
+ app: git-backend
+```
+
+If the remote is outside the cluster, the IP of the remote or its load balancer
+can also be whitelisted, e.g.:
+
+```yaml
+gerrit:
+ networkPolicy:
+ egress:
+ - to:
+ - ipBlock:
+ cidr: xxx.xxx.0.0/16
+```
+
+The same principle also applies to other use cases, e.g. connecting to a database.
+For more information about the NetworkPolicy resource refer to the
+[Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/).
+
+### Workaround for NFS
+
+Kubernetes will not always be able to adapt the ownership of the files within NFS
+volumes. Thus, a workaround exists that will add init-containers to
+adapt file ownership. Note, that only the ownership of the root directory of the
+volume will be changed. All data contained within will be expected to already be
+owned by the user used by Gerrit. Also the ID-domain will be configured to ensure
+correct ID-mapping.
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `nfsWorkaround.enabled` | Whether the volume used is an NFS-volume | `false` |
+| `nfsWorkaround.chownOnStartup` | Whether to chown the volume on pod startup | `false` |
+| `nfsWorkaround.idDomain` | The ID-domain that should be used to map user-/group-IDs for the NFS mount | `localdomain.com` |
+
+### Storage for Git repositories
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `gitRepositoryStorage.externalPVC.use` | Whether to use a PVC deployed outside the chart | `false` |
+| `gitRepositoryStorage.externalPVC.name` | Name of the external PVC | `git-repositories-pvc` |
+| `gitRepositoryStorage.size` | Size of the volume storing the Git repositories | `5Gi` |
+
+If the git repositories should be persisted even if the chart is deleted and in
+a way that the volume containing them can be mounted by the reinstalled chart,
+the PVC claiming the volume has to be created independently of the chart. To use
+the external PVC, set `gitRepositoryStorage.externalPVC.enabled` to `true` and
+give the name of the PVC under `gitRepositoryStorage.externalPVC.name`.
+
+### Storage for Logs
+
+The logs can be stored in a dedicated persistent volume. This volume has to be a
+read-write-many volume to be able to be used by multiple pods.
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `logStorage.enabled` | Whether to enable persistence of logs | `false` |
+| `logStorage.externalPVC.use` | Whether to use a PVC deployed outside the chart | `false` |
+| `logStorage.externalPVC.name` | Name of the external PVC | `gerrit-logs-pvc` |
+| `logStorage.size` | Size of the volume | `5Gi` |
+| `logStorage.cleanup.enabled` | Whether to regularly delete old logs | `false` |
+| `logStorage.cleanup.schedule` | Cron schedule defining when to run the cleanup job | `0 0 * * *` |
+| `logStorage.cleanup.retentionDays` | Number of days to retain the logs | `14` |
+| `logStorage.cleanup.resources` | Resources the container is allowed to use | `requests.cpu: 100m` |
+| `logStorage.cleanup.additionalPodLabels` | Additional labels for pods | `{}` |
+| | | `requests.memory: 256Mi` |
+| | | `limits.cpu: 100m` |
+| | | `limits.memory: 256Mi` |
+
+Each pod will create a separate folder for its logs, allowing to trace logs to
+the respective pods.
+
+### CA certificate
+
+Some application may require TLS verification. If the default CA built into the
+containers is not enough a custom CA certificate can be given to the deployment.
+Note, that Gerrit will require its CA in a JKS keytore, which is described below.
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `caCert` | CA certificate for TLS verification (if not set, the default will be used) | `None` |
+
+### Ingress
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `ingress.enabled` | Whether to enable the Ingress | `false` |
+| `ingress.host` | REQUIRED: Host name to use for the Ingress (required for Ingress) | `nil` |
+| `ingress.additionalAnnotations` | Additional annotations for the Ingress | `nil` |
+| `ingress.tls.enabled` | Whether to enable TLS termination in the Ingress | `false` |
+| `ingress.tls.secret.create` | Whether to create a TLS-secret | `true` |
+| `ingress.tls.secret.name` | Name of an external secret that will be used as a TLS-secret | `nil` |
+| `ingress.tls.cert` | Public SSL server certificate | `-----BEGIN CERTIFICATE-----` |
+| `ingress.tls.key` | Private SSL server certificate | `-----BEGIN RSA PRIVATE KEY-----` |
+
+***note
+For graceful shutdown to work with an ingress, the ingress controller has to be
+configured to gracefully close the connections as well.
+***
+
+### Git garbage collection
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `gitGC.image` | Image name of the Git-GC container image | `k8sgerrit/git-gc` |
+| `gitGC.schedule` | Cron-formatted schedule with which to run Git garbage collection | `0 6,18 * * *` |
+| `gitGC.resources` | Configure the amount of resources the pod requests/is allowed | `requests.cpu: 100m` |
+| | | `requests.memory: 256Mi` |
+| | | `limits.cpu: 100m` |
+| | | `limits.memory: 256Mi` |
+| `gitGC.logging.persistence.enabled` | Whether to persist logs | `true` |
+| `gitGC.logging.persistence.size` | Storage size for persisted logs | `1Gi` |
+| `gitGC.tolerations` | Taints and tolerations work together to ensure that pods are not scheduled onto inappropriate nodes. For more information, please refer to the following documents. [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration) | [] |
+| `gitGC.nodeSelector` | Assigns a Pod to the specified Nodes. For more information, please refer to the following documents. [Assign Pods to Nodes](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/). [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) | {} |
+| `gitGC.affinity` | Assigns a Pod to the specified Nodes. For more information, please refer to the following documents. [Assign Pods to Nodes using Node Affinity](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/). [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) | {} |
+| `gitGC.additionalPodLabels` | Additional labels for Pods | `{}` |
+
+### Gerrit
+
+***note
+The way the Jetty servlet used by Gerrit works, the Gerrit component of the
+gerrit chart actually requires the URL to be known, when the chart is installed.
+The suggested way to do that is to use the provided Ingress resource. This requires
+that a URL is available and that the DNS is configured to point the URL to the
+IP of the node the Ingress controller is running on!
+***
+
+***note
+Setting the canonical web URL in the gerrit.config to the host used for the Ingress
+is mandatory, if access to Gerrit is required!
+***
+
+***note
+While the chart allows to configure multiple replica for the Gerrit StatefulSet,
+scaling of Gerrit is currently not supported, since no mechanism to guarantee a
+consistent state is currently in place. This is planned to be implemented in the
+future.
+***
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `gerrit.images.gerritInit` | Image name of the Gerrit init container image | `k8sgerrit/gerrit-init` |
+| `gerrit.images.gerrit` | Image name of the Gerrit container image | `k8sgerrit/gerrit` |
+| `gerrit.tolerations` | Taints and tolerations work together to ensure that pods are not scheduled onto inappropriate nodes. For more information, please refer to the following documents. [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration) | [] |
+| `gerrit.topologySpreadConstraints` | Control how Pods are spread across your cluster among failure-domains. For more information, please refer to the following documents. [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints) | {} |
+| `gerrit.nodeSelector` | Assigns a Pod to the specified Nodes. For more information, please refer to the following documents. [Assign Pods to Nodes](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/). [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) | {} |
+| `gerrit.affinity` | Assigns a Pod to the specified Nodes. For more information, please refer to the following documents. [Assign Pods to Nodes using Node Affinity](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/). [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) | {} |
+| `gerrit.additionalAnnotations` | Additional annotations for the Pods | {} |
+| `gerrit.additionalPodLabels` | Additional labels for Pods | `{}` |
+| `gerrit.replicas` | Number of replica pods to deploy | `1` |
+| `gerrit.updatePartition` | Ordinal at which to start updating pods. Pods with a lower ordinal will not be updated. | `0` |
+| `gerrit.resources` | Configure the amount of resources the pod requests/is allowed | `requests.cpu: 1` |
+| | | `requests.memory: 5Gi` |
+| | | `limits.cpu: 1` |
+| | | `limits.memory: 6Gi` |
+| `gerrit.persistence.enabled` | Whether to persist the Gerrit site | `true` |
+| `gerrit.persistence.size` | Storage size for persisted Gerrit site | `10Gi` |
+| `gerrit.probeScheme` | Scheme for probes, for example HTTPS | `nil` |
+| `gerrit.livenessProbe` | Configuration of the liveness probe timings | `{initialDelaySeconds: 30, periodSeconds: 5}` |
+| `gerrit.readinessProbe` | Configuration of the readiness probe timings | `{initialDelaySeconds: 5, periodSeconds: 1}` |
+| `gerrit.startupProbe` | Configuration of the startup probe timings | `{initialDelaySeconds: 10, periodSeconds: 5}` |
+| `gerrit.gracefulStopTimeout` | Time in seconds Kubernetes will wait until killing the pod during termination (has to be longer then Gerrit's httpd.gracefulStopTimeout to allow graceful shutdown of Gerrit) | `90` |
+| `gerrit.networkPolicy.ingress` | Custom ingress-network policy for gerrit pods | `nil` |
+| `gerrit.networkPolicy.egress` | Custom egress-network policy for gerrit pods | `nil` |
+| `gerrit.service.additionalAnnotations` | Additional annotations for the Service | `{}` |
+| `gerrit.service.loadBalancerSourceRanges` | The list of allowed IPs for the Service | `[]` |
+| `gerrit.service.type` | Which kind of Service to deploy | `NodePort` |
+| `gerrit.service.externalTrafficPolicy` | Specify how traffic from external is handled | `Cluster` |
+| `gerrit.service.http.port` | Port over which to expose HTTP | `80` |
+| `gerrit.service.ssh.enabled` | Whether to enable SSH | `false` |
+| `gerrit.service.ssh.port` | Port over which to expose SSH | `29418` |
+| `gerrit.keystore` | base64-encoded Java keystore (`cat keystore.jks \| base64`) to be used by Gerrit, when using SSL | `nil` |
+| `gerrit.index.type` | Index type used by Gerrit (either `lucene` or `elasticsearch`) | `lucene` |
+| `gerrit.pluginManagement.plugins` | List of Gerrit plugins to install | `[]` |
+| `gerrit.pluginManagement.plugins[0].name` | Name of plugin | `nil` |
+| `gerrit.pluginManagement.plugins[0].url` | Download url of plugin. If given the plugin will be downloaded, otherwise it will be installed from the gerrit.war-file. | `nil` |
+| `gerrit.pluginManagement.plugins[0].sha1` | SHA1 sum of plugin jar used to ensure file integrity and version (optional) | `nil` |
+| `gerrit.pluginManagement.plugins[0].installAsLibrary` | Whether the plugin should be symlinked to the lib-dir in the Gerrit site. | `nil` |
+| `gerrit.pluginManagement.libs` | List of Gerrit library modules to install | `[]` |
+| `gerrit.pluginManagement.libs[0].name` | Name of the lib module | `nil` |
+| `gerrit.pluginManagement.libs[0].url` | Download url of lib module. | `nil` |
+| `gerrit.pluginManagement.libs[0].sha1` | SHA1 sum of plugin jar used to ensure file integrity and version | `nil` |
+| `gerrit.pluginManagement.cache.enabled` | Whether to cache downloaded plugins | `false` |
+| `gerrit.pluginManagement.cache.size` | Size of the volume used to store cached plugins | `1Gi` |
+| `gerrit.priorityClassName` | Name of the PriorityClass to apply to the master pod | `nil` |
+| `gerrit.etc.config` | Map of config files (e.g. `gerrit.config`) that will be mounted to `$GERRIT_SITE/etc`by a ConfigMap | `{gerrit.config: ..., replication.config: ...}`[see here](#Gerrit-config-files) |
+| `gerrit.etc.secret` | Map of config files (e.g. `secure.config`) that will be mounted to `$GERRIT_SITE/etc`by a Secret | `{secure.config: ...}` [see here](#Gerrit-config-files) |
+| `gerrit.additionalConfigMaps` | Allows to mount additional ConfigMaps into a subdirectory of `$SITE/data` | `[]` |
+| `gerrit.additionalConfigMaps[*].name` | Name of the ConfigMap | `nil` |
+| `gerrit.additionalConfigMaps[*].subDir` | Subdirectory under `$SITE/data` into which the files should be symlinked | `nil` |
+| `gerrit.additionalConfigMaps[*].data` | Data of the ConfigMap. If not set, ConfigMap has to be created manually | `nil` |
+
+### Gerrit config files
+
+The gerrit chart provides a ConfigMap containing the configuration files
+used by Gerrit, e.g. `gerrit.config` and a Secret containing sensitive configuration
+like the `secure.config` to configure the Gerrit installation in the Gerrit
+component. The content of the config files can be set in the `values.yaml` under
+the keys `gerrit.etc.config` and `gerrit.etc.secret` respectively.
+The key has to be the filename (eg. `gerrit.config`) and the file's contents
+the value. This way an arbitrary number of configuration files can be loaded into
+the `$GERRIT_SITE/etc`-directory, e.g. for plugins.
+All configuration options for Gerrit are described in detail in the
+[official documentation of Gerrit](https://gerrit-review.googlesource.com/Documentation/config-gerrit.html).
+Some options however have to be set in a specified way for Gerrit to work as
+intended with the chart:
+
+- `gerrit.basePath`
+
+ Path to the directory containing the repositories. The chart mounts this
+ directory from a persistent volume to `/var/gerrit/git` in the container. For
+ Gerrit to find the correct directory, this has to be set to `git`.
+
+- `gerrit.serverId`
+
+ In Gerrit-version higher than 2.14 Gerrit needs a server ID, which is used by
+ NoteDB. Gerrit would usually generate a random ID on startup, but since the
+ gerrit.config file is read only, when mounted as a ConfigMap this fails.
+ Thus the server ID has to be set manually!
+
+- `gerrit.canonicalWebUrl`
+
+ The canonical web URL has to be set to the Ingress host.
+
+- `httpd.listenURL`
+
+ This has to be set to `proxy-http://*:8080/` or `proxy-https://*:8080`,
+ depending of TLS is enabled in the Ingress or not, otherwise the Jetty
+ servlet will run into an endless redirect loop.
+
+- `httpd.gracefulStopTimeout` / `sshd.gracefulStopTimeout`
+
+ To enable graceful shutdown of the embedded jetty server and SSHD, a timeout
+ has to be set with this option. This will be the maximum time, Gerrit will wait
+ for HTTP requests to finish before shutdown.
+
+- `container.user`
+
+ The technical user in the Gerrit container is called `gerrit`. Thus, this
+ value is required to be `gerrit`.
+
+- `container.javaHome`
+
+ This has to be set to `/usr/lib/jvm/java-11-openjdk-amd64`, since this is
+ the path of the Java installation in the container.
+
+- `container.javaOptions`
+
+ The maximum heap size has to be set. And its value has to be lower than the
+ memory resource limit set for the container (e.g. `-Xmx4g`). In your calculation,
+ allow memory for other components running in the container.
+
+To enable liveness- and readiness probes, the healthcheck plugin will be installed
+by default. Note, that by configuring to use a packaged or downloaded version of
+the healthcheck plugin, the configured version will take precedence over the default
+version. The plugin is by default configured to disable the `querychanges` and
+`auth` healthchecks, since these would not work on a new and empty Gerrit server.
+The default configuration can be overwritten by adding the `healthcheck.config`
+file as a key-value pair to `gerrit.etc.config` as for every other configuration.
+
+SSH keys should be configured via the helm-chart using the `gerrit.etc.secret`
+map. Gerrit will create its own keys, if none are present in the site, but if
+multiple Gerrit pods are running, each Gerrit instance would have its own keys.
+Users accessing Gerrit via a load balancer would get issues due to changing
+host keys.
+
+### Installing Gerrit plugins
+
+There are several different ways to install plugins for Gerrit:
+
+- **RECOMMENDED: Package the plugins to install into the WAR-file containing Gerrit.**
+ This method provides the most stable way to install plugins, but requires to
+ use a custom built gerrit-war file and container images, if plugins are required
+ that are not part of the official `release.war`-file.
+
+- **Download and cache plugins.** The chart supports downloading the plugin files and
+ to cache them in a separate volume, that is shared between Gerrit-pods. SHA1-
+ sums are used to validate plugin-files and versions.
+
+- **Download plugins, but do not cache them.** This should only be used during
+ development to save resources (the shared volume). Each pod will download the
+ plugin-files on its own. Pods will fail to start up, if the download-URL is
+ not valid anymore at some point in time.
+
+## Upgrading the Chart
+
+To upgrade an existing installation of the gerrit chart, e.g. to install
+a newer chart version or to use an updated custom `values.yaml`-file, execute
+the following command:
+
+```sh
+cd $(git rev-parse --show-toplevel)/helm-charts
+helm upgrade \
+ <release-name> \
+ ./gerrit \ # path to chart
+ -f <path-to-custom-values>.yaml
+```
+
+## Uninstalling the Chart
+
+To delete the chart from the cluster, use:
+
+```sh
+helm delete <release-name>
+```
diff --git a/charts/gerrit/docs/nfs-provisioner.md b/charts/gerrit/docs/nfs-provisioner.md
new file mode 100644
index 0000000..9e83d47
--- /dev/null
+++ b/charts/gerrit/docs/nfs-provisioner.md
@@ -0,0 +1,64 @@
+# Installing a NFS-provisioner
+
+Gerrit requires access to a persistent volume capable of running in
+`Read Write Many (RWM)`-mode to store the git repositories, since the repositories
+have to be accessed by mutiple pods. One possibility to provide such volumes
+is to install a provisioner for NFS-volumes into the same Kubernetes-cluster.
+This document will guide through the process.
+
+The [Kubernetes external-storage project](https://github.com/kubernetes-incubator/external-storage)
+provides an out-of-tree dynamic [provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs)
+for NFS volumes. A chart exists for easy deployment of the project onto a
+Kubernetes cluster. The chart's sources can be found [here](https://github.com/helm/charts/tree/master/stable/nfs-server-provisioner).
+
+## Prerequisites
+
+This guide will use Helm to install the NFS-provisioner. Thus, Helm has to be
+installed.
+
+## Installing the nfs-server-provisioner chart
+
+A custom `values.yaml`-file containing a configuration tested with the
+gerrit charts can be found in the `supplements/nfs`-directory in the
+gerrit chart's root directory. In addition a file stating the tested
+version of the nfs-server-provisioner chart is present in the same directory.
+
+If needed, adapt the `values.yaml`-file for the nfs-server-provisioner chart
+further and then run:
+
+```sh
+cd $(git rev-parse --show-toplevel)/helm-charts/gerrit/supplements/nfs
+helm install nfs \
+ stable/nfs-server-provisioner \
+ -f values.yaml \
+ --version $(cat VERSION)
+```
+
+For a description of the configuration options, refer to the
+[chart's documentation](https://github.com/helm/charts/blob/master/stable/nfs-server-provisioner/README.md).
+
+Here are some tips for configuring the nfs-server-provisioner chart to work with
+the gerrit chart:
+
+- Deploying more than 1 `replica` led to some reliability issues in tests and
+ should be further tested for now, if required.
+- The name of the StorageClass created for NFS-volumes has to be the same as the
+ one defined in the gerrit chart for `storageClasses.shared.name`
+- The StorageClas for NFS-volumes needs to have the parameter `mountOptions: vers=4.1`,
+ due to compatibility [issues](https://github.com/kubernetes-incubator/external-storage/issues/223)
+ with Ganesha.
+
+## Deleting the nfs-server-provisioner chart
+
+***note
+**Attention:** Never delete the nfs-server-provisioner chart, if there is still a
+PersistentVolumeClaim and Pods using a NFS-volume provisioned by the NFS server
+provisioner. This will lead to crashed pods, that will not be terminated correctly.
+***
+
+If no Pod or PVC is using a NFS-volume provisioned by the NFS server provisioner
+anymore, delete it like any other chart:
+
+```sh
+helm delete nfs
+```
diff --git a/charts/gerrit/supplements/nfs/VERSION b/charts/gerrit/supplements/nfs/VERSION
new file mode 100644
index 0000000..7dff5b8
--- /dev/null
+++ b/charts/gerrit/supplements/nfs/VERSION
@@ -0,0 +1 @@
+0.2.1
\ No newline at end of file
diff --git a/charts/gerrit/supplements/nfs/values.yaml b/charts/gerrit/supplements/nfs/values.yaml
new file mode 100644
index 0000000..a413d8a
--- /dev/null
+++ b/charts/gerrit/supplements/nfs/values.yaml
@@ -0,0 +1,42 @@
+# Deploying more than 1 `replica` led to some reliability issues in tests and
+# should be further tested for now, if required.
+replicaCount: 1
+
+image:
+ repository: quay.io/kubernetes_incubator/nfs-provisioner
+ tag: v1.0.9
+ pullPolicy: IfNotPresent
+
+service:
+ type: ClusterIP
+ nfsPort: 2049
+ mountdPort: 20048
+ rpcbindPort: 51413
+
+persistence:
+ enabled: true
+ storageClass: default
+ accessMode: ReadWriteOnce
+ size: 7.5Gi
+
+storageClass:
+ create: true
+ defaultClass: false
+ # The name of the StorageClass has to be the same as the one defined in the
+ # gerrit chart for `storageClasses.shared.name`
+ name: shared-storage
+ parameters:
+ # Required!
+ mountOptions: vers=4.1
+ reclaimPolicy: Delete
+
+rbac:
+ create: true
+
+resources:
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ limits:
+ cpu: 100m
+ memory: 256Mi
diff --git a/charts/gerrit/templates/NOTES.txt b/charts/gerrit/templates/NOTES.txt
new file mode 100644
index 0000000..b71b3b0
--- /dev/null
+++ b/charts/gerrit/templates/NOTES.txt
@@ -0,0 +1,4 @@
+A primary Gerrit instance has been deployed.
+==================================
+
+Gerrit may be accessed under: {{ .Values.ingress.host }}
diff --git a/charts/gerrit/templates/_helpers.tpl b/charts/gerrit/templates/_helpers.tpl
new file mode 100644
index 0000000..bace6fe
--- /dev/null
+++ b/charts/gerrit/templates/_helpers.tpl
@@ -0,0 +1,20 @@
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "gerrit.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create secret to access docker registry
+*/}}
+{{- define "imagePullSecret" }}
+{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.images.registry.name (printf "%s:%s" .Values.images.registry.ImagePullSecret.username .Values.images.registry.ImagePullSecret.password | b64enc) | b64enc }}
+{{- end }}
+
+{{/*
+Add '/' to registry if needed.
+*/}}
+{{- define "registry" -}}
+{{ if .Values.images.registry.name }}{{- printf "%s/" .Values.images.registry.name -}}{{end}}
+{{- end -}}
diff --git a/charts/gerrit/templates/gerrit.configmap.yaml b/charts/gerrit/templates/gerrit.configmap.yaml
new file mode 100644
index 0000000..cd1a6a9
--- /dev/null
+++ b/charts/gerrit/templates/gerrit.configmap.yaml
@@ -0,0 +1,80 @@
+{{- $root := . -}}
+
+{{- if not .Values.gerrit.etc.existingConfigMapName }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Release.Name }}-gerrit-configmap
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ {{- range $key, $value := .Values.gerrit.etc.config }}
+ {{ $key }}:
+{{ toYaml $value | indent 4 }}
+ {{- end }}
+ {{- if not (hasKey .Values.gerrit.etc.config "healthcheck.config") }}
+ healthcheck.config: |-
+ [healthcheck "auth"]
+ # On new instances there may be no users to use for healthchecks
+ enabled = false
+ [healthcheck "querychanges"]
+ # On new instances there won't be any changes to query
+ enabled = false
+ {{- end }}
+---
+{{- end }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Release.Name }}-gerrit-init-configmap
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ gerrit-init.yaml: |-
+ {{ if .Values.caCert -}}
+ caCertPath: /var/config/ca.crt
+ {{- end }}
+ pluginCacheEnabled: {{ .Values.gerrit.pluginManagement.cache.enabled }}
+ pluginCacheDir: /var/mnt/plugins
+ {{- if .Values.gerrit.pluginManagement.plugins }}
+ plugins:
+{{ toYaml .Values.gerrit.pluginManagement.plugins | indent 6}}
+ {{- end }}
+ {{- if .Values.gerrit.pluginManagement.libs }}
+ libs:
+{{ toYaml .Values.gerrit.pluginManagement.libs | indent 6}}
+ {{- end }}
+{{- range .Values.gerrit.additionalConfigMaps -}}
+{{- if .data }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ $root.Release.Name }}-{{ .name }}
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ $root.Release.Name }}
+ chart: {{ template "gerrit.chart" $root }}
+ heritage: {{ $root.Release.Service }}
+ release: {{ $root.Release.Name }}
+ {{- if $root.Values.additionalLabels }}
+{{ toYaml $root.Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+{{ toYaml .data | indent 2 }}
+{{- end }}
+{{- end }}
diff --git a/charts/gerrit/templates/gerrit.secrets.yaml b/charts/gerrit/templates/gerrit.secrets.yaml
new file mode 100644
index 0000000..72cfad3
--- /dev/null
+++ b/charts/gerrit/templates/gerrit.secrets.yaml
@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-gerrit-secure-config
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ {{ if .Values.gerrit.keystore -}}
+ keystore: {{ .Values.gerrit.keystore }}
+ {{- end }}
+ {{- range $key, $value := .Values.gerrit.etc.secret }}
+ {{ $key }}: {{ $value | b64enc }}
+ {{- end }}
+type: Opaque
diff --git a/charts/gerrit/templates/gerrit.service.yaml b/charts/gerrit/templates/gerrit.service.yaml
new file mode 100644
index 0000000..fe16d45
--- /dev/null
+++ b/charts/gerrit/templates/gerrit.service.yaml
@@ -0,0 +1,41 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Release.Name }}-gerrit-service
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+ {{- if .Values.gerrit.service.additionalAnnotations }}
+ annotations:
+{{ toYaml .Values.gerrit.service.additionalAnnotations | indent 4 }}
+ {{- end }}
+spec:
+ {{ with .Values.gerrit.service }}
+ {{- if .loadBalancerSourceRanges -}}
+ loadBalancerSourceRanges:
+{{- range .loadBalancerSourceRanges }}
+ - {{ . | quote }}
+{{- end }}
+ {{- end }}
+ ports:
+ - name: http
+ port: {{ .http.port }}
+ targetPort: 8080
+ {{- if .ssh.enabled }}
+ - name: ssh
+ port: {{ .ssh.port }}
+ targetPort: 29418
+ {{- end }}
+ type: {{ .type }}
+ externalTrafficPolicy: {{ .externalTrafficPolicy }}
+ {{- end }}
+ selector:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+
diff --git a/charts/gerrit/templates/gerrit.stateful-set.yaml b/charts/gerrit/templates/gerrit.stateful-set.yaml
new file mode 100644
index 0000000..2fc33c9
--- /dev/null
+++ b/charts/gerrit/templates/gerrit.stateful-set.yaml
@@ -0,0 +1,299 @@
+{{- $root := . -}}
+
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ .Release.Name }}-gerrit-stateful-set
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ serviceName: {{ .Release.Name }}-gerrit-service
+ replicas: {{ .Values.gerrit.replicas }}
+ updateStrategy:
+ rollingUpdate:
+ partition: {{ .Values.gerrit.updatePartition }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 8 }}
+ {{- end }}
+ {{- if .Values.gerrit.additionalPodLabels }}
+{{ toYaml .Values.gerrit.additionalPodLabels | indent 8 }}
+ {{- end }}
+ annotations:
+ chartRevision: "{{ .Release.Revision }}"
+ {{- if .Values.gerrit.additionalAnnotations }}
+{{ toYaml .Values.gerrit.additionalAnnotations | indent 8 }}
+ {{- end }}
+ spec:
+ {{- with .Values.gerrit.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gerrit.topologySpreadConstraints }}
+ topologySpreadConstraints:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gerrit.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gerrit.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gerrit.priorityClassName }}
+ priorityClassName: {{ . }}
+ {{- end }}
+ terminationGracePeriodSeconds: {{ .Values.gerrit.gracefulStopTimeout }}
+ securityContext:
+ fsGroup: 100
+ {{ if .Values.images.registry.ImagePullSecret.name -}}
+ imagePullSecrets:
+ - name: {{ .Values.images.registry.ImagePullSecret.name }}
+ {{- range .Values.images.additionalImagePullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+ initContainers:
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.chownOnStartup }}
+ - name: nfs-init
+ image: {{ .Values.images.busybox.registry -}}/busybox:{{- .Values.images.busybox.tag }}
+ command:
+ - sh
+ - -c
+ args:
+ - |
+ chown 1000:100 /var/mnt/logs
+ chown 1000:100 /var/mnt/git
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: logs
+ subPathExpr: "gerrit-replica/$(POD_NAME)"
+ mountPath: "/var/mnt/logs"
+ - name: git-repositories
+ mountPath: "/var/mnt/git"
+ {{- if .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ {{- end }}
+ - name: gerrit-init
+ image: {{ template "registry" . }}{{ .Values.gerrit.images.gerritInit }}:{{ .Values.images.version }}
+ imagePullPolicy: {{ .Values.images.imagePullPolicy }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: gerrit-site
+ mountPath: "/var/gerrit"
+ - name: git-repositories
+ mountPath: "/var/mnt/git"
+ - name: logs
+ subPathExpr: "gerrit/$(POD_NAME)"
+ mountPath: "/var/mnt/logs"
+ - name: gerrit-init-config
+ mountPath: "/var/config/gerrit-init.yaml"
+ subPath: gerrit-init.yaml
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ {{- if and .Values.gerrit.pluginManagement.cache.enabled }}
+ - name: gerrit-plugin-cache
+ mountPath: "/var/mnt/plugins"
+ {{- end }}
+ {{ if eq .Values.gerrit.index.type "elasticsearch" -}}
+ - name: gerrit-index-config
+ mountPath: "/var/mnt/index"
+ {{- end }}
+ - name: gerrit-config
+ mountPath: "/var/mnt/etc/config"
+ - name: gerrit-secure-config
+ mountPath: "/var/mnt/etc/secret"
+ {{ if .Values.caCert -}}
+ - name: tls-ca
+ subPath: ca.crt
+ mountPath: "/var/config/ca.crt"
+ {{- end }}
+ {{- range .Values.gerrit.additionalConfigMaps }}
+ - name: {{ .name }}
+ mountPath: "/var/mnt/data/{{ .subDir }}"
+ {{- end }}
+ containers:
+ - name: gerrit
+ image: {{ template "registry" . }}{{ .Values.gerrit.images.gerrit }}:{{ .Values.images.version }}
+ imagePullPolicy: {{ .Values.images.imagePullPolicy }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/bin/ash"
+ - "-c"
+ - "kill -2 $(pidof java) && tail --pid=$(pidof java) -f /dev/null"
+ ports:
+ - name: gerrit-port
+ containerPort: 8080
+ {{- if .Values.gerrit.service.ssh.enabled }}
+ - name: gerrit-ssh
+ containerPort: 29418
+ {{- end }}
+ volumeMounts:
+ - name: gerrit-site
+ mountPath: "/var/gerrit"
+ - name: git-repositories
+ mountPath: "/var/mnt/git"
+ - name: logs
+ subPathExpr: "gerrit/$(POD_NAME)"
+ mountPath: "/var/mnt/logs"
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ {{ if eq .Values.gerrit.index.type "elasticsearch" -}}
+ - name: gerrit-index-config
+ mountPath: "/var/mnt/index"
+ {{- end }}
+ - name: gerrit-config
+ mountPath: "/var/mnt/etc/config"
+ - name: gerrit-secure-config
+ mountPath: "/var/mnt/etc/secret"
+ {{- range .Values.gerrit.additionalConfigMaps }}
+ - name: {{ .name }}
+ mountPath: "/var/mnt/data/{{ .subDir }}"
+ {{- end }}
+ resources:
+{{ toYaml .Values.gerrit.resources | indent 10 }}
+ livenessProbe:
+ httpGet:
+ path: /config/server/healthcheck~status
+ port: gerrit-port
+{{- if .Values.gerrit.probeScheme }}
+ scheme: {{ .Values.gerrit.probeScheme }}
+{{- end }}
+{{ toYaml .Values.gerrit.livenessProbe | indent 10 }}
+ readinessProbe:
+ httpGet:
+ path: /config/server/healthcheck~status
+ port: gerrit-port
+{{- if .Values.gerrit.probeScheme }}
+ scheme: {{ .Values.gerrit.probeScheme }}
+{{- end }}
+{{ toYaml .Values.gerrit.readinessProbe | indent 10 }}
+ startupProbe:
+ httpGet:
+ path: /config/server/healthcheck~status
+ port: gerrit-port
+{{- if .Values.gerrit.probeScheme }}
+ scheme: {{ .Values.gerrit.probeScheme }}
+{{- end }}
+{{ toYaml .Values.gerrit.startupProbe | indent 10 }}
+ volumes:
+ {{ if not .Values.gerrit.persistence.enabled -}}
+ - name: gerrit-site
+ emptyDir: {}
+ {{- end }}
+ {{- if and .Values.gerrit.pluginManagement.cache.enabled }}
+ - name: gerrit-plugin-cache
+ persistentVolumeClaim:
+ claimName: {{ .Release.Name }}-plugin-cache-pvc
+ {{- end }}
+ - name: git-repositories
+ persistentVolumeClaim:
+ {{- if .Values.gitRepositoryStorage.externalPVC.use }}
+ claimName: {{ .Values.gitRepositoryStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-git-repositories-pvc
+ {{- end }}
+ - name: logs
+ {{ if .Values.logStorage.enabled -}}
+ persistentVolumeClaim:
+ {{- if .Values.logStorage.externalPVC.use }}
+ claimName: {{ .Values.logStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-log-pvc
+ {{- end }}
+ {{ else -}}
+ emptyDir: {}
+ {{- end }}
+ - name: gerrit-init-config
+ configMap:
+ name: {{ .Release.Name }}-gerrit-init-configmap
+ {{ if eq .Values.gerrit.index.type "elasticsearch" -}}
+ - name: gerrit-index-config
+ persistentVolumeClaim:
+ claimName: {{ .Release.Name }}-gerrit-index-config-pvc
+ {{- end }}
+ - name: gerrit-config
+ configMap:
+ name: {{ if .Values.gerrit.etc.existingConfigMapName }}{{ .Values.gerrit.etc.existingConfigMapName }}{{ else }} {{ .Release.Name }}-gerrit-configmap{{ end }}
+ - name: gerrit-secure-config
+ secret:
+ secretName: {{ .Release.Name }}-gerrit-secure-config
+ {{ if .Values.caCert -}}
+ - name: tls-ca
+ secret:
+ secretName: {{ .Release.Name }}-tls-ca
+ {{- end }}
+ {{- range .Values.gerrit.additionalConfigMaps }}
+ - name: {{ .name }}
+ configMap:
+ name: {{ if .data }}{{ $root.Release.Name }}-{{ .name }}{{ else }}{{ .name }}{{ end }}
+ {{- end }}
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ configMap:
+ name: {{ .Release.Name }}-nfs-configmap
+ {{- end }}
+ {{ if .Values.gerrit.persistence.enabled -}}
+ volumeClaimTemplates:
+ - metadata:
+ name: gerrit-site
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 8 }}
+ {{- end }}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.gerrit.persistence.size }}
+ storageClassName: {{ .Values.storageClasses.default.name }}
+ {{- end }}
diff --git a/charts/gerrit/templates/gerrit.storage.yaml b/charts/gerrit/templates/gerrit.storage.yaml
new file mode 100644
index 0000000..1d85fc6
--- /dev/null
+++ b/charts/gerrit/templates/gerrit.storage.yaml
@@ -0,0 +1,45 @@
+{{- if and .Values.gerrit.pluginManagement.cache.enabled }}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-plugin-cache-pvc
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: {{ .Values.gerrit.pluginManagement.cache.size }}
+ storageClassName: {{ .Values.storageClasses.shared.name }}
+{{- end }}
+{{ if eq .Values.gerrit.index.type "elasticsearch" -}}
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-gerrit-index-config-pvc
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 10Mi
+ storageClassName: {{ .Values.storageClasses.shared.name }}
+{{- end }}
diff --git a/charts/gerrit/templates/git-gc.cronjob.yaml b/charts/gerrit/templates/git-gc.cronjob.yaml
new file mode 100644
index 0000000..8230e5d
--- /dev/null
+++ b/charts/gerrit/templates/git-gc.cronjob.yaml
@@ -0,0 +1,132 @@
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: {{ .Release.Name }}-git-gc
+ labels:
+ app.kubernetes.io/component: git-gc
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ schedule: {{ .Values.gitGC.schedule | quote }}
+ concurrencyPolicy: "Forbid"
+ jobTemplate:
+ spec:
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: git-gc
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 12 }}
+ {{- end }}
+ {{- if .Values.gitGC.additionalPodLabels }}
+{{ toYaml .Values.gitGC.additionalPodLabels | indent 12 }}
+ {{- end }}
+ annotations:
+ cluster-autoscaler.kubernetes.io/safe-to-evict: "false"
+ spec:
+ {{- with .Values.gitGC.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 10 }}
+ {{- end }}
+ {{- with .Values.gitGC.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.gitGC.affinity }}
+ affinity:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ restartPolicy: OnFailure
+ securityContext:
+ runAsUser: 1000
+ fsGroup: 100
+ {{ if .Values.images.registry.ImagePullSecret.name -}}
+ imagePullSecrets:
+ - name: {{ .Values.images.registry.ImagePullSecret.name }}
+ {{- range .Values.images.additionalImagePullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+ initContainers:
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.chownOnStartup }}
+ - name: nfs-init
+ image: {{ .Values.images.busybox.registry -}}/busybox:{{- .Values.images.busybox.tag }}
+ command:
+ - sh
+ - -c
+ args:
+ - |
+ chown 1000:100 /var/mnt/logs
+ chown 1000:100 /var/mnt/git
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: logs
+ subPathExpr: "git-gc/$(POD_NAME)"
+ mountPath: "/var/mnt/logs"
+ - name: git-repositories
+ mountPath: "/var/mnt/git"
+ {{- if .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ {{- end }}
+ containers:
+ - name: git-gc
+ imagePullPolicy: {{ .Values.images.imagePullPolicy }}
+ image: {{ template "registry" . }}{{ .Values.gitGC.image }}:{{ .Values.images.version }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ resources:
+{{ toYaml .Values.gitGC.resources | indent 14 }}
+ volumeMounts:
+ - name: git-repositories
+ mountPath: "/var/gerrit/git"
+ - name: logs
+ subPathExpr: "git-gc/$(POD_NAME)"
+ mountPath: "/var/log/git"
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ volumes:
+ - name: git-repositories
+ persistentVolumeClaim:
+ {{- if .Values.gitRepositoryStorage.externalPVC.use }}
+ claimName: {{ .Values.gitRepositoryStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-git-repositories-pvc
+ {{- end }}
+ - name: logs
+ {{ if .Values.logStorage.enabled -}}
+ persistentVolumeClaim:
+ {{- if .Values.logStorage.externalPVC.use }}
+ claimName: {{ .Values.logStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-log-pvc
+ {{- end }}
+ {{ else -}}
+ emptyDir: {}
+ {{- end }}
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ configMap:
+ name: {{ .Release.Name }}-nfs-configmap
+ {{- end }}
diff --git a/charts/gerrit/templates/git-gc.storage.yaml b/charts/gerrit/templates/git-gc.storage.yaml
new file mode 100644
index 0000000..c69a647
--- /dev/null
+++ b/charts/gerrit/templates/git-gc.storage.yaml
@@ -0,0 +1,22 @@
+{{ if .Values.gitGC.logging.persistence.enabled -}}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-git-gc-logs-pvc
+ labels:
+ app.kubernetes.io/component: git-gc
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.gitGC.logging.persistence.size }}
+ storageClassName: {{ .Values.storageClasses.default.name }}
+{{- end }}
diff --git a/charts/gerrit/templates/global.secrets.yaml b/charts/gerrit/templates/global.secrets.yaml
new file mode 100644
index 0000000..b2c3d5d
--- /dev/null
+++ b/charts/gerrit/templates/global.secrets.yaml
@@ -0,0 +1,18 @@
+{{ if .Values.caCert -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-tls-ca
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ ca.crt: {{ .Values.caCert | b64enc }}
+type: Opaque
+{{- end }}
diff --git a/charts/gerrit/templates/image-pull.secret.yaml b/charts/gerrit/templates/image-pull.secret.yaml
new file mode 100644
index 0000000..d107472
--- /dev/null
+++ b/charts/gerrit/templates/image-pull.secret.yaml
@@ -0,0 +1,9 @@
+{{ if and .Values.images.registry.ImagePullSecret.name .Values.images.registry.ImagePullSecret.create -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Values.images.registry.ImagePullSecret.name }}
+type: kubernetes.io/dockerconfigjson
+data:
+ .dockerconfigjson: {{ template "imagePullSecret" . }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/gerrit/templates/ingress.yaml b/charts/gerrit/templates/ingress.yaml
new file mode 100644
index 0000000..eb19655
--- /dev/null
+++ b/charts/gerrit/templates/ingress.yaml
@@ -0,0 +1,64 @@
+{{- if .Values.ingress.enabled }}
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: {{ .Release.Name }}-gerrit-ingress
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.ingress.additionalLabels }}
+{{ toYaml .Values.ingress.additionalLabels | indent 4 }}
+ {{- end }}
+ annotations:
+ nginx.ingress.kubernetes.io/proxy-body-size: {{ .Values.ingress.maxBodySize | default "50m" }}
+ {{- if .Values.ingress.additionalAnnotations }}
+{{ toYaml .Values.ingress.additionalAnnotations | indent 4 }}
+ {{- end }}
+spec:
+ {{ if .Values.ingress.tls.enabled -}}
+ tls:
+ - hosts:
+ - {{ .Values.ingress.host }}
+ {{ if .Values.ingress.tls.secret.create -}}
+ secretName: {{ .Release.Name }}-gerrit-tls-secret
+ {{- else }}
+ secretName: {{ .Values.ingress.tls.secret.name }}
+ {{- end }}
+ {{- end }}
+ rules:
+ - host: {{required "A host URL is required for the Gerrit Ingress. Please set 'ingress.host'" .Values.ingress.host }}
+ http:
+ paths:
+ - pathType: Prefix
+ path: /
+ backend:
+ service:
+ name: {{ .Release.Name }}-gerrit-service
+ port:
+ number: {{ .Values.gerrit.service.http.port }}
+{{- end }}
+---
+{{ if and .Values.ingress.tls.enabled .Values.ingress.tls.secret.create -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-gerrit-tls-secret
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.ingress.additionalLabels }}
+{{ toYaml .Values.ingress.additionalLabels | indent 4 }}
+ {{- end }}
+type: kubernetes.io/tls
+data:
+ {{ with .Values.ingress.tls -}}
+ tls.crt: {{ .cert | b64enc }}
+ tls.key: {{ .key | b64enc }}
+ {{- end }}
+{{- end }}
diff --git a/charts/gerrit/templates/log-cleaner.cronjob.yaml b/charts/gerrit/templates/log-cleaner.cronjob.yaml
new file mode 100644
index 0000000..c1314f1
--- /dev/null
+++ b/charts/gerrit/templates/log-cleaner.cronjob.yaml
@@ -0,0 +1,65 @@
+{{- if and .Values.logStorage.enabled .Values.logStorage.cleanup.enabled }}
+apiVersion: batch/v1beta1
+kind: CronJob
+metadata:
+ name: {{ .Release.Name }}-log-cleaner
+ labels:
+ app.kubernetes.io/component: log-cleaner
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ schedule: {{ .Values.logStorage.cleanup.schedule | quote }}
+ concurrencyPolicy: "Forbid"
+ jobTemplate:
+ spec:
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: log-cleaner
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 12 }}
+ {{- end }}
+ {{- if .Values.logStorage.cleanup.additionalPodLabels }}
+{{ toYaml .Values.logStorage.cleanup.additionalPodLabels | indent 12 }}
+ {{- end }}
+ spec:
+ restartPolicy: OnFailure
+ containers:
+ - name: log-cleaner
+ imagePullPolicy: {{ .Values.images.imagePullPolicy }}
+ image: {{ .Values.images.busybox.registry -}}/busybox:{{- .Values.images.busybox.tag }}
+ command:
+ - sh
+ - -c
+ args:
+ - |
+ find /var/logs/ \
+ -mindepth 1 \
+ -type f \
+ -mtime +{{ .Values.logStorage.cleanup.retentionDays }} \
+ -print \
+ -delete
+ find /var/logs/ -type d -empty -delete
+ resources:
+{{ toYaml .Values.logStorage.cleanup.resources | indent 14 }}
+ volumeMounts:
+ - name: logs
+ mountPath: "/var/logs"
+ volumes:
+ - name: logs
+ persistentVolumeClaim:
+ {{- if .Values.logStorage.externalPVC.use }}
+ claimName: {{ .Values.logStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-log-pvc
+ {{- end }}
+{{- end }}
diff --git a/charts/gerrit/templates/netpol.yaml b/charts/gerrit/templates/netpol.yaml
new file mode 100644
index 0000000..c0cbc4d
--- /dev/null
+++ b/charts/gerrit/templates/netpol.yaml
@@ -0,0 +1,122 @@
+{{ if .Values.networkPolicies.enabled -}}
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: {{ .Release.Name }}-default-deny-all
+ labels:
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.networkPolicies.additionalLabels }}
+{{ toYaml .Values.networkPolicies.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit.chart" . }}
+ release: {{ .Release.Name }}
+ policyTypes:
+ - Ingress
+ - Egress
+ ingress: []
+ egress: []
+---
+{{ if .Values.networkPolicies.dnsPorts -}}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: {{ .Release.Name }}-allow-dns-access
+ labels:
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.networkPolicies.additionalLabels }}
+{{ toYaml .Values.networkPolicies.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit.chart" . }}
+ release: {{ .Release.Name }}
+ policyTypes:
+ - Egress
+ egress:
+ - ports:
+ {{ range .Values.networkPolicies.dnsPorts -}}
+ - port: {{ . }}
+ protocol: UDP
+ - port: {{ . }}
+ protocol: TCP
+ {{ end }}
+{{- end }}
+---
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: gerrit-allow-external
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+spec:
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit.chart" . }}
+ release: {{ .Release.Name }}
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ ingress:
+ - ports:
+ - port: 8080
+ from: []
+---
+{{ if or .Values.gerrit.networkPolicy.ingress -}}
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: gerrit-custom-ingress-policies
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit.chart" . }}
+ release: {{ .Release.Name }}
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ ingress:
+{{ toYaml .Values.gerrit.networkPolicy.ingress | indent 2 }}
+{{- end }}
+---
+{{ if or .Values.gerrit.networkPolicy.egress -}}
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: gerrit-custom-egress-policies
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit.chart" . }}
+ release: {{ .Release.Name }}
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ egress:
+{{ toYaml .Values.gerrit.networkPolicy.egress | indent 2 }}
+{{- end }}
+{{- end }}
diff --git a/charts/gerrit/templates/nfs.configmap.yaml b/charts/gerrit/templates/nfs.configmap.yaml
new file mode 100644
index 0000000..dd2c3dd
--- /dev/null
+++ b/charts/gerrit/templates/nfs.configmap.yaml
@@ -0,0 +1,28 @@
+{{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Release.Name }}-nfs-configmap
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ idmapd.conf: |-
+ [General]
+
+ Verbosity = 0
+ Pipefs-Directory = /run/rpc_pipefs
+ # set your own domain here, if it differs from FQDN minus hostname
+ Domain = {{ .Values.nfsWorkaround.idDomain }}
+
+ [Mapping]
+
+ Nobody-User = nobody
+ Nobody-Group = nogroup
+{{- end }}
diff --git a/charts/gerrit/templates/storage.pvc.yaml b/charts/gerrit/templates/storage.pvc.yaml
new file mode 100644
index 0000000..b262402
--- /dev/null
+++ b/charts/gerrit/templates/storage.pvc.yaml
@@ -0,0 +1,45 @@
+{{- if not .Values.gitRepositoryStorage.externalPVC.use }}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-git-repositories-pvc
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: {{ .Values.gitRepositoryStorage.size }}
+ storageClassName: {{ .Values.storageClasses.shared.name }}
+{{- end }}
+{{- if and .Values.logStorage.enabled (not .Values.logStorage.externalPVC.use) }}
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-log-pvc
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: {{ .Values.logStorage.size }}
+ storageClassName: {{ .Values.storageClasses.shared.name }}
+{{- end }}
diff --git a/charts/gerrit/templates/storageclasses.yaml b/charts/gerrit/templates/storageclasses.yaml
new file mode 100644
index 0000000..552cd6a
--- /dev/null
+++ b/charts/gerrit/templates/storageclasses.yaml
@@ -0,0 +1,53 @@
+{{ if .Values.storageClasses.default.create -}}
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: {{ .Values.storageClasses.default.name }}
+ labels:
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+provisioner: {{ .Values.storageClasses.default.provisioner }}
+reclaimPolicy: {{ .Values.storageClasses.default.reclaimPolicy }}
+{{ if .Values.storageClasses.default.parameters -}}
+parameters:
+{{- range $key, $value := .Values.storageClasses.default.parameters }}
+ {{ $key }}: {{ $value }}
+{{- end }}
+mountOptions:
+{{- range $value := .Values.storageClasses.default.mountOptions }}
+ - {{ $value }}
+{{- end }}
+allowVolumeExpansion: {{ .Values.storageClasses.default.allowVolumeExpansion }}
+{{- end }}
+{{- end }}
+---
+{{ if .Values.storageClasses.shared.create -}}
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: {{ .Values.storageClasses.shared.name }}
+ labels:
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+provisioner: {{ .Values.storageClasses.shared.provisioner }}
+reclaimPolicy: {{ .Values.storageClasses.shared.reclaimPolicy }}
+{{ if .Values.storageClasses.shared.parameters -}}
+parameters:
+{{- range $key, $value := .Values.storageClasses.shared.parameters }}
+ {{ $key }}: {{ $value }}
+{{- end }}
+mountOptions:
+{{- range $value := .Values.storageClasses.shared.mountOptions }}
+ - {{ $value }}
+{{- end }}
+allowVolumeExpansion: {{ .Values.storageClasses.shared.allowVolumeExpansion }}
+{{- end }}
+{{- end }}
diff --git a/charts/gerrit/values.yaml b/charts/gerrit/values.yaml
new file mode 100644
index 0000000..c23f55c
--- /dev/null
+++ b/charts/gerrit/values.yaml
@@ -0,0 +1,338 @@
+images:
+ busybox:
+ registry: docker.io
+ tag: latest
+ # Registry used for container images created by this project
+ registry:
+ # The registry name must NOT contain a trailing slash
+ name:
+ ImagePullSecret:
+ # Leave blank, if no ImagePullSecret is needed.
+ name: image-pull-secret
+ # If set to false, the gerrit chart expects either a ImagePullSecret
+ # with the name configured above to be present on the cluster or that no
+ # credentials are needed.
+ create: false
+ username:
+ password:
+ version: latest
+ imagePullPolicy: Always
+ # Additional ImagePullSecrets that already exist and should be used by the
+ # pods of this chart. E.g. to pull busybox from dockerhub.
+ additionalImagePullSecrets: []
+
+# Additional labels that should be applied to all resources
+additionalLabels: {}
+
+storageClasses:
+ # Storage class used for storing logs and other pod-specific persisted data
+ default:
+ # If create is set to false, an existing StorageClass with the given
+ # name is expected to exist in the cluster. Setting create to true will
+ # create a storage class with the parameters given below.
+ name: default
+ create: false
+ provisioner: kubernetes.io/aws-ebs
+ reclaimPolicy: Delete
+ # Use the parameters key to set all parameters needed for the provisioner
+ parameters:
+ type: gp2
+ fsType: ext4
+ mountOptions: []
+ allowVolumeExpansion: false
+ # Storage class used for storing git repositories. Has to provide RWM access.
+ shared:
+ # If create is set to false, an existing StorageClass with RWM access
+ # mode and the given name has to be provided.
+ name: shared-storage
+ create: false
+ provisioner: nfs
+ reclaimPolicy: Delete
+ # Use the parameters key to set all parameters needed for the provisioner
+ parameters:
+ mountOptions: vers=4.1
+ mountOptions: []
+ allowVolumeExpansion: false
+
+
+nfsWorkaround:
+ enabled: false
+ chownOnStartup: false
+ idDomain: localdomain.com
+
+
+networkPolicies:
+ enabled: false
+ dnsPorts:
+ - 53
+ - 8053
+
+
+gitRepositoryStorage:
+ externalPVC:
+ use: false
+ name: git-repositories-pvc
+ size: 5Gi
+
+logStorage:
+ enabled: false
+ externalPVC:
+ use: false
+ name: gerrit-logs-pvc
+ size: 5Gi
+ cleanup:
+ enabled: false
+ additionalPodLabels: {}
+ schedule: "0 0 * * *"
+ retentionDays: 14
+ resources:
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ limits:
+ cpu: 100m
+ memory: 256Mi
+
+caCert:
+
+ingress:
+ enabled: false
+ host:
+ # The maximum body size to allow for requests. Use "0" to allow unlimited
+ # reuqest body sizes.
+ maxBodySize: 50m
+ additionalAnnotations:
+ kubernetes.io/ingress.class: nginx
+ # nginx.ingress.kubernetes.io/server-alias: example.com
+ # nginx.ingress.kubernetes.io/whitelist-source-range: xxx.xxx.xxx.xxx
+ tls:
+ enabled: false
+ secret:
+ create: true
+ # `name` will only be used, if `create` is set to false to bind an
+ # existing secret. Otherwise the name will be automatically generated to
+ # avoid conflicts between multiple chart installations.
+ name:
+ # `cert`and `key` will only be used, if the secret will be created by
+ # this chart.
+ cert: |-
+ -----BEGIN CERTIFICATE-----
+
+ -----END CERTIFICATE-----
+ key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+
+ -----END RSA PRIVATE KEY-----
+
+
+gitGC:
+ image: k8sgerrit/git-gc
+
+ tolerations: []
+ nodeSelector: {}
+ affinity: {}
+ additionalPodLabels: {}
+
+ schedule: 0 6,18 * * *
+
+ resources:
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ limits:
+ cpu: 100m
+ memory: 256Mi
+
+ logging:
+ persistence:
+ enabled: true
+ size: 1Gi
+
+
+gerrit:
+ images:
+ gerritInit: k8sgerrit/gerrit-init
+ gerrit: k8sgerrit/gerrit
+
+ tolerations: []
+ topologySpreadConstraints: {}
+ nodeSelector: {}
+ affinity: {}
+ additionalAnnotations: {}
+ additionalPodLabels: {}
+
+ replicas: 1
+ updatePartition: 0
+
+ # The memory limit has to be higher than the configures heap-size for Java!
+ resources:
+ requests:
+ cpu: 1
+ memory: 5Gi
+ limits:
+ cpu: 1
+ memory: 6Gi
+
+ persistence:
+ enabled: true
+ size: 10Gi
+
+ # If no value for probeScheme, the probe will use the default HTTP
+ probeScheme: HTTP
+
+ livenessProbe:
+ initialDelaySeconds: 30
+ periodSeconds: 5
+
+ readinessProbe:
+ initialDelaySeconds: 5
+ periodSeconds: 1
+
+ startupProbe:
+ initialDelaySeconds: 10
+ periodSeconds: 30
+
+ gracefulStopTimeout: 90
+
+ # The general NetworkPolicy rules implemented by this chart may be too restrictive
+ # for some setups, e.g. when trying to replicate to a Gerrit replica. Here
+ # custom rules may be added to whitelist some additional connections.
+ networkPolicy:
+ ingress: []
+ egress: []
+ # An example for an egress rule to allow replication to a Gerrit replica
+ # installed with the gerrit-replica setup in the same cluster and namespace
+ # by using the service as the replication destination
+ # (e.g. http://gerrit-replica-git-backend-service:80/git/${name}.git):
+ #
+ # - to:
+ # - podSelector:
+ # matchLabels:
+ # app: git-backend
+
+ service:
+ additionalAnnotations: {}
+ loadBalancerSourceRanges: []
+ type: NodePort
+ externalTrafficPolicy: Cluster
+ http:
+ port: 80
+ ssh:
+ enabled: false
+ port: 29418
+
+ # `gerrit.keystore` expects a base64-encoded Java-keystore
+ # Since Java keystores are binary files, adding the unencoded content and
+ # automatic encoding using helm does not work here.
+ keystore:
+
+ index:
+ # Either `lucene` or `elasticsearch`
+ type: lucene
+
+ pluginManagement:
+ plugins: []
+ # A plugin packaged in the gerrit.war-file
+ # - name: download-commands
+
+ # A plugin packaged in the gerrit.war-file that will also be installed as a
+ # lib
+ # - name: replication
+ # installAsLibrary: true
+
+ # A plugin that will be downloaded on startup
+ # - name: delete-project
+ # url: https://example.com/gerrit-plugins/delete-project.jar
+ # sha1:
+ # installAsLibrary: false
+
+ # Only downloaded plugins will be cached. This will be ignored, if no plugins
+ # are downloaded.
+ libs: []
+ cache:
+ enabled: false
+ size: 1Gi
+
+ priorityClassName:
+
+ etc:
+ # If provided config section below will be ignored.
+ existingConfigMapName: ""
+ # Some values are expected to have a specific value for the deployment installed
+ # by this chart to work. These are marked with `# FIXED`.
+ # Do not change them!
+ config:
+ gerrit.config: |-
+ [gerrit]
+ basePath = git # FIXED
+ serverId = gerrit-1
+ # The canonical web URL has to be set to the Ingress host, if an Ingress
+ # is used. If a LoadBalancer-service is used, this should be set to the
+ # LoadBalancer's external IP. This can only be done manually after installing
+ # the chart, when you know the external IP the LoadBalancer got from the
+ # cluster.
+ canonicalWebUrl = http://example.com/
+ disableReverseDnsLookup = true
+ [index]
+ type = LUCENE
+ [auth]
+ type = DEVELOPMENT_BECOME_ANY_ACCOUNT
+ [httpd]
+ # If using an ingress use proxy-http or proxy-https
+ listenUrl = proxy-http://*:8080/
+ requestLog = true
+ gracefulStopTimeout = 1m
+ [sshd]
+ listenAddress = off
+ [transfer]
+ timeout = 120 s
+ [user]
+ name = Gerrit Code Review
+ email = gerrit@example.com
+ anonymousCoward = Unnamed User
+ [cache]
+ directory = cache
+ [container]
+ user = gerrit # FIXED
+ javaHome = /usr/lib/jvm/java-17-openjdk # FIXED
+ javaOptions = -Djavax.net.ssl.trustStore=/var/gerrit/etc/keystore # FIXED
+ javaOptions = -Xms200m
+ # Has to be lower than 'gerrit.resources.limits.memory'. Also
+ # consider memories used by other applications in the container.
+ javaOptions = -Xmx4g
+
+ replication.config: |-
+ [gerrit]
+ autoReload = false
+ replicateOnStartup = true
+ defaultForceUpdate = true
+
+ # [remote "replica"]
+ # url = http://gerrit-replica.example.com/git/${name}.git
+ # replicationDelay = 0
+ # timeout = 30
+
+ secret:
+ secure.config: |-
+ # Password for the keystore added as value for 'gerritReplica.keystore'
+ # Only needed, if SSL is enabled.
+ #[httpd]
+ # sslKeyPassword = gerrit
+
+ # Credentials for replication targets
+ # [remote "replica"]
+ # username = git
+ # password = secret
+
+ # ssh_host_ecdsa_key: |-
+ # -----BEGIN EC PRIVATE KEY-----
+
+ # -----END EC PRIVATE KEY-----
+
+ # ssh_host_ecdsa_key.pub: ecdsa-sha2-nistp256...
+
+ additionalConfigMaps:
+ # - name:
+ # subDir:
+ # data:
+ # file.txt: test
diff --git a/charts/headscale-controller/Chart.yaml b/charts/headscale-controller/Chart.yaml
new file mode 100644
index 0000000..23162f0
--- /dev/null
+++ b/charts/headscale-controller/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: headscale-controller
+description: A Helm chart for headscale-controller
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/headscale-controller/templates/crds.yaml b/charts/headscale-controller/templates/crds.yaml
new file mode 100644
index 0000000..4dc4db0
--- /dev/null
+++ b/charts/headscale-controller/templates/crds.yaml
@@ -0,0 +1,57 @@
+{{ if .Values.installCRDs }}
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.9.2
+ creationTimestamp: null
+ name: headscaleusers.headscale.dodo.cloud
+spec:
+ group: headscale.dodo.cloud
+ names:
+ kind: HeadscaleUser
+ listKind: HeadscaleUserList
+ plural: headscaleusers
+ singular: headscaleuser
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: HeadscaleUser is the Schema for the headscaleusers API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: HeadscaleUserSpec defines the desired state of HeadscaleUser
+ properties:
+ headscaleAddress:
+ type: string
+ name:
+ type: string
+ preAuthKey:
+ properties:
+ enabled:
+ type: boolean
+ secretName:
+ type: string
+ type: object
+ type: object
+ status:
+ description: HeadscaleUserStatus defines the observed state of HeadscaleUser
+ properties:
+ ready:
+ type: boolean
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+{{ end }}
diff --git a/charts/headscale-controller/templates/install.yaml b/charts/headscale-controller/templates/install.yaml
new file mode 100644
index 0000000..7c2c129
--- /dev/null
+++ b/charts/headscale-controller/templates/install.yaml
@@ -0,0 +1,282 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: controller-controller-manager
+ namespace: {{ .Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: controller-leader-election-role
+ namespace: {{ .Release.Namespace }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: controller-manager-role
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - headscale.dodo.cloud
+ resources:
+ - headscaleusers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - headscale.dodo.cloud
+ resources:
+ - headscaleusers/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - headscale.dodo.cloud
+ resources:
+ - headscaleusers/status
+ verbs:
+ - get
+ - patch
+ - update
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: controller-metrics-reader
+rules:
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: controller-proxy-role
+rules:
+- apiGroups:
+ - authentication.k8s.io
+ resources:
+ - tokenreviews
+ verbs:
+ - create
+- apiGroups:
+ - authorization.k8s.io
+ resources:
+ - subjectaccessreviews
+ verbs:
+ - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: controller-leader-election-rolebinding
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: controller-leader-election-role
+subjects:
+- kind: ServiceAccount
+ name: controller-controller-manager
+ namespace: {{ .Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: controller-manager-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: controller-manager-role
+subjects:
+- kind: ServiceAccount
+ name: controller-controller-manager
+ namespace: {{ .Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: controller-proxy-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: controller-proxy-role
+subjects:
+- kind: ServiceAccount
+ name: controller-controller-manager
+ namespace: {{ .Release.Namespace }}
+---
+apiVersion: v1
+data:
+ controller_manager_config.yaml: |
+ apiVersion: controller-runtime.sigs.k8s.io/v1alpha1
+ kind: ControllerManagerConfig
+ health:
+ healthProbeBindAddress: :8081
+ metrics:
+ bindAddress: 127.0.0.1:8080
+ webhook:
+ port: 9443
+ leaderElection:
+ leaderElect: true
+ resourceName: 798a733c.dodo.cloud
+ # leaderElectionReleaseOnCancel defines if the leader should step down volume
+ # when the Manager ends. This requires the binary to immediately end when the
+ # Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
+ # speeds up voluntary leader transitions as the new leader don't have to wait
+ # LeaseDuration time first.
+ # In the default scaffold provided, the program ends immediately after
+ # the manager stops, so would be fine to enable this option. However,
+ # if you are doing or is intended to do any operation such as perform cleanups
+ # after the manager stops then its usage might be unsafe.
+ # leaderElectionReleaseOnCancel: true
+kind: ConfigMap
+metadata:
+ name: controller-manager-config
+ namespace: {{ .Release.Namespace }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ control-plane: controller-manager
+ name: controller-controller-manager-metrics-service
+ namespace: {{ .Release.Namespace }}
+spec:
+ ports:
+ - name: https
+ port: 8443
+ protocol: TCP
+ targetPort: https
+ selector:
+ control-plane: controller-manager
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ control-plane: controller-manager
+ name: controller-controller-manager
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ control-plane: controller-manager
+ template:
+ metadata:
+ annotations:
+ kubectl.kubernetes.io/default-container: manager
+ labels:
+ control-plane: controller-manager
+ spec:
+ containers:
+ - args:
+ - --secure-listen-address=0.0.0.0:8443
+ - --upstream=http://127.0.0.1:8080/
+ - --logtostderr=true
+ - --v=0
+ image: {{ .Values.kubeRBACProxy.image.repository }}:{{ .Values.kubeRBACProxy.image.tag }}
+ name: kube-rbac-proxy
+ ports:
+ - containerPort: 8443
+ name: https
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 500m
+ memory: 128Mi
+ requests:
+ cpu: 5m
+ memory: 64Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ - args:
+ - --health-probe-bind-address=:8081
+ - --metrics-bind-address=127.0.0.1:8080
+ - --leader-elect
+ command:
+ - /manager
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 8081
+ initialDelaySeconds: 15
+ periodSeconds: 20
+ name: manager
+ readinessProbe:
+ httpGet:
+ path: /readyz
+ port: 8081
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ resources:
+ limits:
+ cpu: 500m
+ memory: 128Mi
+ requests:
+ cpu: 10m
+ memory: 64Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ securityContext:
+ runAsNonRoot: true
+ serviceAccountName: controller-controller-manager
+ terminationGracePeriodSeconds: 10
diff --git a/charts/headscale-controller/values.yaml b/charts/headscale-controller/values.yaml
new file mode 100644
index 0000000..2cb2dfc
--- /dev/null
+++ b/charts/headscale-controller/values.yaml
@@ -0,0 +1,10 @@
+image:
+ repository: giolekva/headscale-controller
+ tag: latest
+ pullPolicy: Always
+kubeRBACProxy:
+ image:
+ repository: "gcr.io/kubebuilder/kube-rbac-proxy"
+ tag: v0.13.0
+ pullPolicy: IfNotPresent
+installCRDs: false
diff --git a/charts/headscale-user/.helmignore b/charts/headscale-user/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/headscale-user/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/headscale-user/Chart.yaml b/charts/headscale-user/Chart.yaml
new file mode 100644
index 0000000..2b8cdf9
--- /dev/null
+++ b/charts/headscale-user/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: headscale-user
+description: A Helm chart to create headscale users
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/headscale-user/templates/headscale-user.yaml b/charts/headscale-user/templates/headscale-user.yaml
new file mode 100644
index 0000000..8fc998f
--- /dev/null
+++ b/charts/headscale-user/templates/headscale-user.yaml
@@ -0,0 +1,11 @@
+apiVersion: headscale.dodo.cloud/v1
+kind: HeadscaleUser
+metadata:
+ name: {{ .Values.username }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ headscaleAddress: {{ .Values.headscaleApiAddress }}
+ name: {{ .Values.username }}
+ preAuthKey:
+ enabled: {{ .Values.preAuthKey.enabled }}
+ secretName: {{ .Values.preAuthKey.secretName }}
diff --git a/charts/headscale-user/values.yaml b/charts/headscale-user/values.yaml
new file mode 100644
index 0000000..c91e8d8
--- /dev/null
+++ b/charts/headscale-user/values.yaml
@@ -0,0 +1,6 @@
+username: foo
+headscaleApiAddress: headscale-api.example.com
+preAuthKey:
+ enabled: false
+ secretName: foo-secret
+
diff --git a/charts/headscale/Chart.yaml b/charts/headscale/Chart.yaml
new file mode 100644
index 0000000..f1688de
--- /dev/null
+++ b/charts/headscale/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: headscale
+description: A Helm chart to run Headscale on PCloud
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/headscale/templates/_helpers.tpl b/charts/headscale/templates/_helpers.tpl
new file mode 100644
index 0000000..063b2b4
--- /dev/null
+++ b/charts/headscale/templates/_helpers.tpl
@@ -0,0 +1,7 @@
+{{- define "clientSecret" -}}
+{{- if .Values.oauth2.clientSecret -}}
+{{- .Values.oauth2.clientSecret -}}
+{{- else -}}
+{{- randAlphaNum 32 -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/headscale/templates/config.yaml b/charts/headscale/templates/config.yaml
new file mode 100644
index 0000000..7c007ba
--- /dev/null
+++ b/charts/headscale/templates/config.yaml
@@ -0,0 +1,302 @@
+apiVersion: dodo.cloud.dodo.cloud/v1
+kind: ResourceRenderer
+metadata:
+ name: config-renderer
+ namespace: {{ .Release.Namespace }}
+spec:
+ secretName: {{ .Values.oauth2.secretName }}
+ resourceTemplate: |
+ apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: config
+ namespace: {{ .Release.Namespace }}
+ data:
+ config.yaml: |
+ # headscale will look for a configuration file named `config.yaml` (or `config.json`) in the following order:
+ #
+ # - `/etc/headscale`
+ # - `~/.headscale`
+ # - current working directory
+
+ # The url clients will connect to.
+ # Typically this will be a domain like:
+ #
+ # https://myheadscale.example.com:443
+ #
+ server_url: https://{{ .Values.domain }}
+
+ # Address to listen to / bind to on the server
+ #
+ # For production:
+ listen_addr: 0.0.0.0:8080
+ # listen_addr: 127.0.0.1:8080
+
+ # Address to listen to /metrics, you may want
+ # to keep this endpoint private to your internal
+ # network
+ #
+ metrics_listen_addr: 0.0.0.0:9090
+ # metrics_listen_addr: 127.0.0.1:9090
+
+ # Address to listen for gRPC.
+ # gRPC is used for controlling a headscale server
+ # remotely with the CLI
+ # Note: Remote access _only_ works if you have
+ # valid certificates.
+ #
+ # For production:
+ grpc_listen_addr: 0.0.0.0:50443
+ # grpc_listen_addr: 127.0.0.1:50443
+
+ # Allow the gRPC admin interface to run in INSECURE
+ # mode. This is not recommended as the traffic will
+ # be unencrypted. Only enable if you know what you
+ # are doing.
+ grpc_allow_insecure: false
+
+ # Private key used to encrypt the traffic between headscale
+ # and Tailscale clients.
+ # The private key file will be autogenerated if it's missing.
+ #
+ # For production:
+ # /var/lib/headscale/private.key
+ private_key_path: /headscale/data/private.key
+
+ # The Noise section includes specific configuration for the
+ # TS2021 Noise protocol
+ noise:
+ # The Noise private key is used to encrypt the
+ # traffic between headscale and Tailscale clients when
+ # using the new Noise-based protocol. It must be different
+ # from the legacy private key.
+ #
+ # For production:
+ # private_key_path: /var/lib/headscale/noise_private.key
+ private_key_path: /headscale/data/noise_private.key
+
+ # List of IP prefixes to allocate tailaddresses from.
+ # Each prefix consists of either an IPv4 or IPv6 address,
+ # and the associated prefix length, delimited by a slash.
+ ip_prefixes:
+ # - fd7a:115c:a1e0::/48
+ - 100.64.0.0/10
+
+ # DERP is a relay system that Tailscale uses when a direct
+ # connection cannot be established.
+ # https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp
+ #
+ # headscale needs a list of DERP servers that can be presented
+ # to the clients.
+ derp:
+ server:
+ # If enabled, runs the embedded DERP server and merges it into the rest of the DERP config
+ # The Headscale server_url defined above MUST be using https, DERP requires TLS to be in place
+ enabled: false
+
+ # Region ID to use for the embedded DERP server.
+ # The local DERP prevails if the region ID collides with other region ID coming from
+ # the regular DERP config.
+ region_id: 999
+
+ # Region code and name are displayed in the Tailscale UI to identify a DERP region
+ region_code: "headscale"
+ region_name: "Headscale Embedded DERP"
+
+ # Listens over UDP at the configured address for STUN connections - to help with NAT traversal.
+ # When the embedded DERP server is enabled stun_listen_addr MUST be defined.
+ #
+ # For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
+ stun_listen_addr: "0.0.0.0:3478"
+
+ # List of externally available DERP maps encoded in JSON
+ urls:
+ - https://controlplane.tailscale.com/derpmap/default
+
+ # Locally available DERP map files encoded in YAML
+ #
+ # This option is mostly interesting for people hosting
+ # their own DERP servers:
+ # https://tailscale.com/kb/1118/custom-derp-servers/
+ #
+ # paths:
+ # - /etc/headscale/derp-example.yaml
+ paths: []
+
+ # If enabled, a worker will be set up to periodically
+ # refresh the given sources and update the derpmap
+ # will be set up.
+ auto_update_enabled: true
+
+ # How often should we check for DERP updates?
+ update_frequency: 24h
+
+ # Disables the automatic check for headscale updates on startup
+ disable_check_updates: true
+
+ # Time before an inactive ephemeral node is deleted?
+ ephemeral_node_inactivity_timeout: 30m
+
+ # Period to check for node updates within the tailnet. A value too low will severely affect
+ # CPU consumption of Headscale. A value too high (over 60s) will cause problems
+ # for the nodes, as they won't get updates or keep alive messages frequently enough.
+ # In case of doubts, do not touch the default 10s.
+ node_update_check_interval: 10s
+
+ # SQLite config
+ db_type: sqlite3
+
+ # For production:
+ # db_path: /var/lib/headscale/db.sqlite
+ db_path: /headscale/data/db.sqlite
+
+ # # Postgres config
+ # If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank.
+ # db_type: postgres
+ # db_host: localhost
+ # db_port: 5432
+ # db_name: headscale
+ # db_user: foo
+ # db_pass: bar
+
+ # If other 'sslmode' is required instead of 'require(true)' and 'disabled(false)', set the 'sslmode' you need
+ # in the 'db_ssl' field. Refers to https://www.postgresql.org/docs/current/libpq-ssl.html Table 34.1.
+ # db_ssl: false
+
+ ### TLS configuration
+ #
+ ## Let's encrypt / ACME
+ #
+ # headscale supports automatically requesting and setting up
+ # TLS for a domain with Let's Encrypt.
+ #
+ # URL to ACME directory
+ acme_url: https://acme-v02.api.letsencrypt.org/directory
+
+ # Email to register with ACME provider
+ acme_email: ""
+
+ # Domain name to request a TLS certificate for:
+ tls_letsencrypt_hostname: ""
+
+ # Path to store certificates and metadata needed by
+ # letsencrypt
+ # For production:
+ # tls_letsencrypt_cache_dir: /var/lib/headscale/cache
+ tls_letsencrypt_cache_dir: ./cache
+
+ # Type of ACME challenge to use, currently supported types:
+ # HTTP-01 or TLS-ALPN-01
+ # See [docs/tls.md](docs/tls.md) for more information
+ tls_letsencrypt_challenge_type: HTTP-01
+ # When HTTP-01 challenge is chosen, letsencrypt must set up a
+ # verification endpoint, and it will be listening on:
+ # :http = port 80
+ tls_letsencrypt_listen: ":http"
+
+ ## Use already defined certificates:
+ tls_cert_path: ""
+ tls_key_path: ""
+
+ log:
+ # Output formatting for logs: text or json
+ format: text
+ level: info
+
+ # Path to a file containg ACL policies.
+ # ACLs can be defined as YAML or HUJSON.
+ # https://tailscale.com/kb/1018/acls/
+ acl_policy_path: "/headscale/acls/config.hujson" # TODO(gio): mount path must be configurable
+
+ ## DNS
+ #
+ # headscale supports Tailscale's DNS configuration and MagicDNS.
+ # Please have a look to their KB to better understand the concepts:
+ #
+ # - https://tailscale.com/kb/1054/dns/
+ # - https://tailscale.com/kb/1081/magicdns/
+ # - https://tailscale.com/blog/2021-09-private-dns-with-magicdns/
+ #
+ dns_config:
+ # Whether to prefer using Headscale provided DNS or use local.
+ override_local_dns: true
+
+ # List of DNS servers to expose to clients.
+ nameservers:
+ - 8.8.8.8
+ - 1.1.1.1
+
+ # NextDNS (see https://tailscale.com/kb/1218/nextdns/).
+ # "abc123" is example NextDNS ID, replace with yours.
+ #
+ # With metadata sharing:
+ # nameservers:
+ # - https://dns.nextdns.io/abc123
+ #
+ # Without metadata sharing:
+ # nameservers:
+ # - 2a07:a8c0::ab:c123
+ # - 2a07:a8c1::ab:c123
+
+ # Split DNS (see https://tailscale.com/kb/1054/dns/),
+ # list of search domains and the DNS to query for each one.
+ #
+ # restricted_nameservers:
+ # foo.bar.com:
+ # - 1.1.1.1
+ # darp.headscale.net:
+ # - 1.1.1.1
+ # - 8.8.8.8
+
+ # Search domains to inject.
+ domains: []
+
+ # Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
+ # Only works if there is at least a nameserver defined.
+ magic_dns: true
+
+ # Defines the base domain to create the hostnames for MagicDNS.
+ # `base_domain` must be a FQDNs, without the trailing dot.
+ # The FQDN of the hosts will be
+ # `hostname.namespace.base_domain` (e.g., _myhost.mynamespace.example.com_).
+ base_domain: {{ .Values.publicBaseDomain }}
+
+ # Unix socket used for the CLI to connect without authentication
+ # Note: for production you will want to set this to something like:
+ # unix_socket: /var/run/headscale.sock
+ unix_socket: /headscale-api/headscale.sock
+ unix_socket_permission: "0770"
+ #
+ # headscale supports experimental OpenID connect support,
+ # it is still being tested and might have some bugs, please
+ # help us test it.
+ # OpenID Connect
+ oidc:
+ only_start_if_oidc_is_available: true
+ issuer: {{ .Values.oauth2.issuer }}
+ client_id: {{`{{ .client_id }}`}}
+ client_secret: {{`{{ .client_secret }}`}}
+ scope: ["openid", "profile", "email"]
+ extra_params:
+ domain_hint: {{ .Values.domain }}
+ allowed_domains:
+ - {{ .Values.publicBaseDomain }}
+ # allowed_groups:
+ # - /headscale
+ # allowed_users:
+ # - alice@example.com
+ strip_email_domain: true
+
+ # Logtail configuration
+ # Logtail is Tailscales logging and auditing infrastructure, it allows the control panel
+ # to instruct tailscale nodes to log their activity to a remote server.
+ logtail:
+ # Enable logtail for this headscales clients.
+ # As there is currently no support for overriding the log server in headscale, this is
+ # disabled by default. Enabling this will make your clients send logs to Tailscale Inc.
+ enabled: false
+
+ # Enabling this option makes devices prefer a random port for WireGuard traffic over the
+ # default static port 41641. This option is intended as a workaround for some buggy
+ # firewall devices. See https://tailscale.com/kb/1181/firewalls/ for more information.
+ randomize_client_port: true
diff --git a/charts/headscale/templates/headscale.yaml b/charts/headscale/templates/headscale.yaml
new file mode 100644
index 0000000..d483854
--- /dev/null
+++ b/charts/headscale/templates/headscale.yaml
@@ -0,0 +1,202 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: ingress
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ acme.cert-manager.io/http01-edit-in-place: "true"
+ cert-manager.io/cluster-issuer: {{ .Values.certificateIssuer}}
+ {{ if .Values.ui.enabled }}
+ nginx.org/rewrites: "serviceName=headscale rewrite=/;serviceName=headscale-ui rewrite=/"
+ {{ end }}
+spec:
+ ingressClassName: {{ .Values.ingressClassName }}
+ tls:
+ - hosts:
+ - {{ .Values.domain }}
+ secretName: cert-{{ .Values.domain }}
+ rules:
+ - host: {{ .Values.domain }}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: headscale
+ port:
+ name: http
+ {{ if .Values.ui.enabled }}
+ - path: /web
+ pathType: Prefix
+ backend:
+ service:
+ name: headscale-ui
+ port:
+ name: http
+ {{ end }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: headscale
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ metallb.universe.tf/address-pool: {{ .Values.ipAddressPool }}
+spec:
+ type: LoadBalancer
+ selector:
+ app: headscale
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ protocol: TCP
+{{ if .Values.ui.enabled }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: headscale-ui
+ namespace: {{ .Release.Namespace }}
+ # annotations:
+ # metallb.universe.tf/address-pool: {{ .Values.ipAddressPool }}
+spec:
+ type: ClusterIP
+ selector:
+ app: headscale
+ ports:
+ - name: http
+ port: 80
+ targetPort: http-ui
+ protocol: TCP
+{{ end }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: headscale-api
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: headscale
+ ports:
+ - name: http
+ port: 80
+ targetPort: http-api
+ protocol: TCP
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: data
+ namespace: {{ .Release.Namespace }}
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.storage.size }}
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: acls
+ namespace: {{ .Release.Namespace }}
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi # TODO(gio): configurable
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: headscale
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: headscale
+ serviceName: headscale
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: headscale
+ spec:
+ volumes:
+ - name: data
+ persistentVolumeClaim:
+ claimName: data
+ - name: acls
+ persistentVolumeClaim:
+ claimName: acls
+ - name: config
+ configMap:
+ name: config
+ - name: api-socket
+ emptyDir: {}
+ containers:
+ - name: headscale
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: 8080
+ protocol: TCP
+ - name: grpc
+ containerPort: 50443
+ protocol: TCP
+ command:
+ - headscale
+ - --config=/headscale/config/config.yaml
+ - serve
+ volumeMounts:
+ - name: data
+ mountPath: /headscale/data
+ readOnly: false
+ - name: config
+ mountPath: /headscale/config
+ readOnly: true
+ - name: acls
+ mountPath: /headscale/acls
+ readOnly: true
+ - mountPath: /headscale-api
+ name: api-socket
+ - name: headscale-api
+ image: {{ .Values.api.image.repository }}:{{ .Values.api.image.tag }}
+ imagePullPolicy: {{ .Values.api.image.pullPolicy }}
+ ports:
+ - name: http-api
+ containerPort: {{ .Values.api.port }}
+ protocol: TCP
+ command:
+ - headscale-api
+ - --port={{ .Values.api.port }}
+ - --config=/headscale/config/config.yaml
+ - --ip-subnet={{ .Values.api.ipSubnet }}
+ - --acls=/headscale/acls/config.hujson
+ volumeMounts:
+ - name: data
+ mountPath: /headscale/data
+ readOnly: false
+ - name: config
+ mountPath: /headscale/config
+ readOnly: true
+ - name: acls
+ mountPath: /headscale/acls
+ readOnly: false
+ - mountPath: /headscale-api
+ name: api-socket
+ {{ if .Values.ui.enabled }}
+ - name: headscale-ui # TODO(gio): separate deployment
+ image: {{ .Values.ui.image.repository }}:{{ .Values.ui.image.tag }}
+ imagePullPolicy: {{ .Values.ui.image.pullPolicy }}
+ ports:
+ - name: http-ui
+ containerPort: 80
+ protocol: TCP
+ {{ end }}
diff --git a/charts/headscale/values.yaml b/charts/headscale/values.yaml
new file mode 100644
index 0000000..18d7b57
--- /dev/null
+++ b/charts/headscale/values.yaml
@@ -0,0 +1,27 @@
+image:
+ repository: headscale/headscale
+ tag: 0.22.3
+ pullPolicy: IfNotPresent
+storage:
+ size: 5Gi
+ingressClassName: pcloud-ingress-public
+certificateIssuer: lekva-public
+domain: headscale.example.com
+publicBaseDomain: example.com
+ipAddressPool: example-headscale
+oauth2:
+ secretName: oauth2-client
+ issuer: https://oidc-issuer.example.com
+api:
+ port: 8585
+ ipSubnet: 10.1.0.0/24
+ image:
+ repository: giolekva/headscale-api
+ tag: latest
+ pullPolicy: Always
+ui:
+ enabled: false
+ image:
+ repository: ghcr.io/gurucomputing/headscale-ui
+ tag: latest
+ pullPolicy: Always
diff --git a/charts/hydra-maester/.helmignore b/charts/hydra-maester/.helmignore
new file mode 100644
index 0000000..f0c1319
--- /dev/null
+++ b/charts/hydra-maester/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/charts/hydra-maester/Chart.yaml b/charts/hydra-maester/Chart.yaml
new file mode 100644
index 0000000..81233c5
--- /dev/null
+++ b/charts/hydra-maester/Chart.yaml
@@ -0,0 +1,7 @@
+apiVersion: v2
+appVersion: v0.0.23
+description: A Helm chart for Kubernetes
+icon: https://raw.githubusercontent.com/ory/docs/master/docs/static/img/logo-hydra.svg
+name: hydra-maester
+type: application
+version: 0.33.1
diff --git a/charts/hydra-maester/README.md b/charts/hydra-maester/README.md
new file mode 100644
index 0000000..838ca01
--- /dev/null
+++ b/charts/hydra-maester/README.md
@@ -0,0 +1,44 @@
+# hydra-maester
+
+  
+
+A Helm chart for Kubernetes
+
+## Values
+
+| Key | Type | Default | Description |
+|-----|------|---------|-------------|
+| adminService | object | `{"endpoint":"/admin/clients","name":null,"port":null}` | Connection data to admin service of Hydra |
+| adminService.endpoint | string | `"/admin/clients"` | Set the clients endpoint, should be `/clients` for Hydra 1.x and `/admin/clients` for Hydra 2.x |
+| adminService.name | string | `nil` | Service name |
+| adminService.port | string | `nil` | Service port |
+| affinity | object | `{}` | Configure node affinity |
+| deployment.args | object | `{"syncPeriod":""}` | Arguments to be passed to the program |
+| deployment.args.syncPeriod | string | `""` | The minimum frequency at which watched resources are reconciled |
+| deployment.automountServiceAccountToken | bool | `true` | This applications connects to the k8s API and requires the permissions |
+| deployment.dnsConfig | object | `{}` | Configure pod dnsConfig. |
+| deployment.extraAnnotations | object | `{}` | Deployment level extra annotations |
+| deployment.extraLabels | object | `{}` | Deployment level extra labels |
+| deployment.nodeSelector | object | `{}` | Node labels for pod assignment. |
+| deployment.podMetadata | object | `{"annotations":{},"labels":{}}` | Specify pod metadata, this metadata is added directly to the pod, and not higher objects |
+| deployment.podMetadata.annotations | object | `{}` | Extra pod level annotations |
+| deployment.podMetadata.labels | object | `{}` | Extra pod level labels |
+| deployment.resources | object | `{}` | |
+| deployment.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsNonRoot":true,"runAsUser":1000,"seccompProfile":{"type":"RuntimeDefault"}}` | Default security context |
+| deployment.serviceAccount | object | `{"annotations":{}}` | Configure service account |
+| deployment.serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
+| deployment.tolerations | list | `[]` | Configure node tolerations. |
+| deployment.topologySpreadConstraints | list | `[]` | Configure pod topologySpreadConstraints. |
+| enabledNamespaces | list | `[]` | The Controller have CREATE and READ access to all Secrets in the namespaces listed below. |
+| forwardedProto | string | `nil` | |
+| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy |
+| image.repository | string | `"oryd/hydra-maester"` | Ory Hydra-maester image |
+| image.tag | string | `"v0.0.27"` | Ory Hydra-maester version |
+| imagePullSecrets | list | `[]` | Image pull secrets |
+| pdb | object | `{"enabled":false,"spec":{"minAvailable":1}}` | PodDistributionBudget configuration |
+| priorityClassName | string | `""` | Pod priority # https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ |
+| replicaCount | int | `1` | Number of replicas in deployment |
+| singleNamespaceMode | bool | `false` | Single namespace mode. If enabled the controller will watch for resources only from namespace it is deployed in, ignoring others |
+
+----------------------------------------------
+Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0)
diff --git a/charts/hydra-maester/crds/crd-oauth2clients.yaml b/charts/hydra-maester/crds/crd-oauth2clients.yaml
new file mode 100644
index 0000000..ebc9ebb
--- /dev/null
+++ b/charts/hydra-maester/crds/crd-oauth2clients.yaml
@@ -0,0 +1,253 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.5.0
+ creationTimestamp: null
+ name: oauth2clients.hydra.ory.sh
+spec:
+ group: hydra.ory.sh
+ names:
+ kind: OAuth2Client
+ listKind: OAuth2ClientList
+ plural: oauth2clients
+ singular: oauth2client
+ scope: Namespaced
+ versions:
+ - name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: OAuth2Client is the Schema for the oauth2clients API
+ properties:
+ apiVersion:
+ description:
+ "APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the
+ latest internal value, and may reject unrecognized values. More
+ info:
+ https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
+ type: string
+ kind:
+ description:
+ "Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the
+ client submits requests to. Cannot be updated. In CamelCase.
+ More info:
+ https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+ type: string
+ metadata:
+ type: object
+ spec:
+ description:
+ OAuth2ClientSpec defines the desired state of OAuth2Client
+ properties:
+ allowedCorsOrigins:
+ description:
+ AllowedCorsOrigins is an array of allowed CORS origins
+ items:
+ description:
+ RedirectURI represents a redirect URI for the client
+ pattern: \w+:/?/?[^\s]+
+ type: string
+ type: array
+ audience:
+ description:
+ Audience is a whitelist defining the audiences this client
+ is allowed to request tokens for
+ items:
+ type: string
+ type: array
+ clientName:
+ description:
+ ClientName is the human-readable string name of the client
+ to be presented to the end-user during authorization.
+ type: string
+ grantTypes:
+ description:
+ GrantTypes is an array of grant types the client is allowed
+ to use.
+ items:
+ description: GrantType represents an OAuth 2.0 grant type
+ enum:
+ - client_credentials
+ - authorization_code
+ - implicit
+ - refresh_token
+ type: string
+ maxItems: 4
+ minItems: 1
+ type: array
+ hydraAdmin:
+ description:
+ HydraAdmin is the optional configuration to use for managing
+ this client
+ properties:
+ endpoint:
+ description:
+ Endpoint is the endpoint for the hydra instance on which
+ to set up the client. This value will override the value
+ provided to `--endpoint` (defaults to `"/clients"` in
+ the application)
+ pattern: (^$|^/.*)
+ type: string
+ forwardedProto:
+ description:
+ ForwardedProto overrides the `--forwarded-proto` flag.
+ The value "off" will force this to be off even if
+ `--forwarded-proto` is specified
+ pattern: (^$|https?|off)
+ type: string
+ port:
+ description:
+ Port is the port for the hydra instance on which to set
+ up the client. This value will override the value
+ provided to `--hydra-port`
+ maximum: 65535
+ type: integer
+ url:
+ description:
+ URL is the URL for the hydra instance on which to set up
+ the client. This value will override the value provided
+ to `--hydra-url`
+ maxLength: 64
+ pattern: (^$|^https?://.*)
+ type: string
+ type: object
+ metadata:
+ description: Metadata is abritrary data
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ postLogoutRedirectUris:
+ description:
+ PostLogoutRedirectURIs is an array of the post logout
+ redirect URIs allowed for the application
+ items:
+ description:
+ RedirectURI represents a redirect URI for the client
+ pattern: \w+:/?/?[^\s]+
+ type: string
+ type: array
+ redirectUris:
+ description:
+ RedirectURIs is an array of the redirect URIs allowed for
+ the application
+ items:
+ description:
+ RedirectURI represents a redirect URI for the client
+ pattern: \w+:/?/?[^\s]+
+ type: string
+ type: array
+ responseTypes:
+ description:
+ ResponseTypes is an array of the OAuth 2.0 response type
+ strings that the client can use at the authorization
+ endpoint.
+ items:
+ description:
+ ResponseType represents an OAuth 2.0 response type strings
+ enum:
+ - id_token
+ - code
+ - token
+ - code token
+ - code id_token
+ - id_token token
+ - code id_token token
+ type: string
+ maxItems: 3
+ minItems: 1
+ type: array
+ scope:
+ description:
+ Scope is a string containing a space-separated list of scope
+ values (as described in Section 3.3 of OAuth 2.0 [RFC6749])
+ that the client can use when requesting access tokens.
+ pattern: ([a-zA-Z0-9\.\*]+\s?)+
+ type: string
+ secretName:
+ description:
+ SecretName points to the K8s secret that contains this
+ client's ID and password
+ maxLength: 253
+ minLength: 1
+ pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*'
+ type: string
+ tokenEndpointAuthMethod:
+ allOf:
+ - enum:
+ - client_secret_basic
+ - client_secret_post
+ - private_key_jwt
+ - none
+ - enum:
+ - client_secret_basic
+ - client_secret_post
+ - private_key_jwt
+ - none
+ description:
+ Indication which authentication method shoud be used for the
+ token endpoint
+ type: string
+ required:
+ - grantTypes
+ - scope
+ - secretName
+ type: object
+ status:
+ description:
+ OAuth2ClientStatus defines the observed state of OAuth2Client
+ properties:
+ conditions:
+ items:
+ description:
+ OAuth2ClientCondition contains condition information for
+ an OAuth2Client
+ properties:
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ type: string
+ required:
+ - status
+ - type
+ type: object
+ type: array
+ observedGeneration:
+ description:
+ ObservedGeneration represents the most recent generation
+ observed by the daemon set controller.
+ format: int64
+ type: integer
+ reconciliationError:
+ description:
+ ReconciliationError represents an error that occurred during
+ the reconciliation process
+ properties:
+ description:
+ description:
+ Description is the description of the reconciliation
+ error
+ type: string
+ statusCode:
+ description:
+ Code is the status code of the reconciliation error
+ type: string
+ type: object
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/charts/hydra-maester/templates/_helpers.tpl b/charts/hydra-maester/templates/_helpers.tpl
new file mode 100644
index 0000000..b607c67
--- /dev/null
+++ b/charts/hydra-maester/templates/_helpers.tpl
@@ -0,0 +1,59 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "hydra-maester.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "hydra-maester.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "hydra-maester.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "hydra-maester.labels" -}}
+app.kubernetes.io/name: {{ include "hydra-maester.name" . }}
+helm.sh/chart: {{ include "hydra-maester.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+
+{{/*
+Get Hydra admin service name
+*/}}
+{{- define "hydra-maester.adminService" -}}
+{{- if .Values.hydraFullnameOverride -}}
+{{- printf "%s-admin" .Values.hydraFullnameOverride -}}
+{{- else if contains "hydra" .Release.Name -}}
+{{- printf "%s-admin" .Release.Name -}}
+{{- else -}}
+{{- printf "%s-%s-admin" .Release.Name "hydra" -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/hydra-maester/templates/deployment.yaml b/charts/hydra-maester/templates/deployment.yaml
new file mode 100644
index 0000000..ed9c3b8
--- /dev/null
+++ b/charts/hydra-maester/templates/deployment.yaml
@@ -0,0 +1,97 @@
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "hydra-maester.fullname" . }}
+ labels:
+ {{- include "hydra-maester.labels" . | nindent 4 }}
+ {{- with .Values.deployment.extraLabels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ control-plane: controller-manager
+ app.kubernetes.io/name: {{ include "hydra-maester.fullname" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ control-plane: controller-manager
+ app.kubernetes.io/name: {{ include "hydra-maester.fullname" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ {{- with .Values.deployment.extraLabels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.deployment.podMetadata.labels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ annotations:
+ {{- with .Values.deployment.extraAnnotations }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.deployment.podMetadata.annotations }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command:
+ - /manager
+ args:
+ - --metrics-addr=127.0.0.1:8080
+ - --hydra-url=http://{{ .Values.adminService.name | default ( include "hydra-maester.adminService" . ) }}
+ - --hydra-port={{ .Values.adminService.port | default 4445 }}
+ {{- with .Values.adminService.endpoint }}
+ - --endpoint={{ . }}
+ {{- end }}
+ {{- if .Values.forwardedProto }}
+ - --forwarded-proto={{ .Values.forwardedProto }}
+ {{- end }}
+ {{- if .Values.singleNamespaceMode }}
+ - --namespace={{ .Release.Namespace }}
+ {{- end }}
+ {{- if .Values.deployment.args.syncPeriod }}
+ - --sync-period={{ .Values.deployment.args.syncPeriod }}
+ {{- end }}
+ resources:
+ {{- toYaml .Values.deployment.resources | nindent 12 }}
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ {{- if .Values.deployment.securityContext }}
+ securityContext:
+ {{- toYaml .Values.deployment.securityContext | nindent 12 }}
+ {{- end }}
+ serviceAccountName: {{ include "hydra-maester.fullname" . }}-account
+ automountServiceAccountToken: {{ .Values.deployment.automountServiceAccountToken }}
+ {{- if .Values.priorityClassName }}
+ priorityClassName: {{ .Values.priorityClassName }}
+ {{- end }}
+ nodeSelector:
+ {{- with .Values.deployment.nodeSelector }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.deployment.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.deployment.topologySpreadConstraints }}
+ topologySpreadConstraints:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.deployment.dnsConfig }}
+ dnsConfig:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/charts/hydra-maester/templates/pdb.yaml b/charts/hydra-maester/templates/pdb.yaml
new file mode 100644
index 0000000..1be53ba
--- /dev/null
+++ b/charts/hydra-maester/templates/pdb.yaml
@@ -0,0 +1,14 @@
+{{- if .Values.pdb.enabled -}}
+---
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: {{ include "hydra-maester.fullname" . }}
+spec:
+ selector:
+ matchLabels:
+ control-plane: controller-manager
+ app.kubernetes.io/name: {{ include "hydra-maester.fullname" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ {{ toYaml .Values.pdb.spec }}
+{{- end -}}
\ No newline at end of file
diff --git a/charts/hydra-maester/templates/rbac.yaml b/charts/hydra-maester/templates/rbac.yaml
new file mode 100644
index 0000000..e67cc62
--- /dev/null
+++ b/charts/hydra-maester/templates/rbac.yaml
@@ -0,0 +1,95 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "hydra-maester.fullname" . }}-account
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "hydra-maester.labels" . | nindent 4 }}
+ {{- with .Values.deployment.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- if not .Values.singleNamespaceMode }}
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ include "hydra-maester.fullname" . }}-role
+rules:
+ - apiGroups: ["hydra.ory.sh"]
+ resources: ["oauth2clients", "oauth2clients/status"]
+ verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["list", "watch", "create"]
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ include "hydra-maester.fullname" . }}-role-binding
+subjects:
+ - kind: ServiceAccount
+ name: {{ include "hydra-maester.fullname" . }}-account # Service account assigned to the controller pod.
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "hydra-maester.fullname" . }}-role
+{{- end }}
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ include "hydra-maester.fullname" . }}-role
+ namespace: {{ .Release.Namespace }}
+rules:
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "list", "watch", "create"]
+ - apiGroups: ["hydra.ory.sh"]
+ resources: ["oauth2clients", "oauth2clients/status"]
+ verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ include "hydra-maester.fullname" . }}-role-binding
+ namespace: {{ .Release.Namespace }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ include "hydra-maester.fullname" . }}-account # Service account assigned to the controller pod.
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ include "hydra-maester.fullname" . }}-role
+
+{{- $name := include "hydra-maester.fullname" . -}}
+{{- $namespace := .Release.Namespace -}}
+{{- range .Values.enabledNamespaces }}
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ $name }}-role
+ namespace: {{ . }}
+rules:
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "list", "watch", "create", "update"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ $name }}-role-binding
+ namespace: {{ . }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ $name }}-account # Service account assigned to the controller pod.
+ namespace: {{ $namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ $name }}-role
+{{- end }}
diff --git a/charts/hydra-maester/values.yaml b/charts/hydra-maester/values.yaml
new file mode 100644
index 0000000..caa6a90
--- /dev/null
+++ b/charts/hydra-maester/values.yaml
@@ -0,0 +1,122 @@
+# -- Number of replicas in deployment
+replicaCount: 1
+
+# -- The Controller have CREATE and READ access to all Secrets in the namespaces listed below.
+enabledNamespaces: []
+
+# -- Single namespace mode. If enabled the controller will watch for resources only from namespace it is deployed in, ignoring others
+singleNamespaceMode: false
+
+image:
+ # -- Ory Hydra-maester image
+ repository: oryd/hydra-maester
+ # -- Ory Hydra-maester version
+ tag: v0.0.27
+ # -- Image pull policy
+ pullPolicy: IfNotPresent
+
+# -- Image pull secrets
+imagePullSecrets: []
+
+# -- Pod priority
+## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
+priorityClassName: ""
+
+# -- Connection data to admin service of Hydra
+adminService:
+ # -- Service name
+ name:
+ # -- Service port
+ port:
+ # -- Set the clients endpoint, should be `/clients` for Hydra 1.x and
+ # `/admin/clients` for Hydra 2.x
+ endpoint: /admin/clients
+
+forwardedProto:
+
+deployment:
+ resources:
+ {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 30Mi
+ # requests:
+ # cpu: 100m
+ # memory: 20Mi
+
+ # -- Default security context
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ seccompProfile:
+ type: RuntimeDefault
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 1000
+ allowPrivilegeEscalation: false
+ privileged: false
+
+ # -- Node labels for pod assignment.
+ nodeSelector: {}
+ # If you do want to specify node labels, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'annotations:'.
+ # foo: bar
+
+ # -- Configure node tolerations.
+ tolerations: []
+ # -- Deployment level extra annotations
+ extraAnnotations: {}
+ # -- Deployment level extra labels
+ extraLabels: {}
+
+ # -- Configure pod topologySpreadConstraints.
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+ # labelSelector:
+ # matchLabels:
+ # app.kubernetes.io/name: hydra
+ # app.kubernetes.io/instance: hydra
+
+ # -- Configure pod dnsConfig.
+ dnsConfig: {}
+ # options:
+ # - name: "ndots"
+ # value: "1"
+
+ # -- Specify pod metadata, this metadata is added directly to the pod, and not higher objects
+ podMetadata:
+ # -- Extra pod level labels
+ labels: {}
+ # -- Extra pod level annotations
+ annotations: {}
+
+ # https://github.com/kubernetes/kubernetes/issues/57601
+ # -- This applications connects to the k8s API and requires the permissions
+ automountServiceAccountToken: true
+
+ # -- Arguments to be passed to the program
+ args:
+ # -- The minimum frequency at which watched resources are reconciled
+ syncPeriod: ""
+ # syncPeriod: 10h
+
+ # -- Configure service account
+ serviceAccount:
+ # -- Annotations to add to the service account
+ annotations: {}
+
+# -- Configure node affinity
+affinity: {}
+
+# -- PodDistributionBudget configuration
+pdb:
+ enabled: false
+ spec:
+ minAvailable: 1
diff --git a/charts/ingress-nginx/.helmignore b/charts/ingress-nginx/.helmignore
new file mode 100644
index 0000000..50af031
--- /dev/null
+++ b/charts/ingress-nginx/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/ingress-nginx/CHANGELOG.md b/charts/ingress-nginx/CHANGELOG.md
new file mode 100644
index 0000000..7d81ac1
--- /dev/null
+++ b/charts/ingress-nginx/CHANGELOG.md
@@ -0,0 +1,460 @@
+# Changelog
+
+This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
+
+### 4.4.0
+
+* Adding support for disabling liveness and readiness probes to the Helm chart by @njegosrailic in https://github.com/kubernetes/ingress-nginx/pull/9238
+* add:(admission-webhooks) ability to set securityContext by @ybelMekk in https://github.com/kubernetes/ingress-nginx/pull/9186
+* #7652 - Updated Helm chart to use the fullname for the electionID if not specified. by @FutureMatt in https://github.com/kubernetes/ingress-nginx/pull/9133
+* Rename controller-wehbooks-networkpolicy.yaml. by @Gacko in https://github.com/kubernetes/ingress-nginx/pull/9123
+
+### 4.3.0
+- Support for Kubernetes v.1.25.0 was added and support for endpoint slices
+- Support for Kubernetes v1.20.0 and v1.21.0 was removed
+- [8890](https://github.com/kubernetes/ingress-nginx/pull/8890) migrate to endpointslices
+- [9059](https://github.com/kubernetes/ingress-nginx/pull/9059) kubewebhookcertgen sha change after go1191
+- [9046](https://github.com/kubernetes/ingress-nginx/pull/9046) Parameterize metrics port name
+- [9104](https://github.com/kubernetes/ingress-nginx/pull/9104) Fix yaml formatting error with multiple annotations
+
+### 4.2.1
+
+- The sha of kube-webhook-certgen image & the opentelemetry image, in values file, was changed to new images built on alpine-v3.16.1
+- "[8896](https://github.com/kubernetes/ingress-nginx/pull/8896) updated to new images built today"
+
+### 4.2.0
+
+- Support for Kubernetes v1.19.0 was removed
+- "[8810](https://github.com/kubernetes/ingress-nginx/pull/8810) Prepare for v1.3.0"
+- "[8808](https://github.com/kubernetes/ingress-nginx/pull/8808) revert arch var name"
+- "[8805](https://github.com/kubernetes/ingress-nginx/pull/8805) Bump k8s.io/klog/v2 from 2.60.1 to 2.70.1"
+- "[8803](https://github.com/kubernetes/ingress-nginx/pull/8803) Update to nginx base with alpine v3.16"
+- "[8802](https://github.com/kubernetes/ingress-nginx/pull/8802) chore: start v1.3.0 release process"
+- "[8798](https://github.com/kubernetes/ingress-nginx/pull/8798) Add v1.24.0 to test matrix"
+- "[8796](https://github.com/kubernetes/ingress-nginx/pull/8796) fix: add MAC_OS variable for static-check"
+- "[8793](https://github.com/kubernetes/ingress-nginx/pull/8793) changed to alpine-v3.16"
+- "[8781](https://github.com/kubernetes/ingress-nginx/pull/8781) Bump github.com/stretchr/testify from 1.7.5 to 1.8.0"
+- "[8778](https://github.com/kubernetes/ingress-nginx/pull/8778) chore: remove stable.txt from release process"
+- "[8775](https://github.com/kubernetes/ingress-nginx/pull/8775) Remove stable"
+- "[8773](https://github.com/kubernetes/ingress-nginx/pull/8773) Bump github/codeql-action from 2.1.14 to 2.1.15"
+- "[8772](https://github.com/kubernetes/ingress-nginx/pull/8772) Bump ossf/scorecard-action from 1.1.1 to 1.1.2"
+- "[8771](https://github.com/kubernetes/ingress-nginx/pull/8771) fix bullet md format"
+- "[8770](https://github.com/kubernetes/ingress-nginx/pull/8770) Add condition for monitoring.coreos.com/v1 API"
+- "[8769](https://github.com/kubernetes/ingress-nginx/pull/8769) Fix typos and add links to developer guide"
+- "[8767](https://github.com/kubernetes/ingress-nginx/pull/8767) change v1.2.0 to v1.2.1 in deploy doc URLs"
+- "[8765](https://github.com/kubernetes/ingress-nginx/pull/8765) Bump github/codeql-action from 1.0.26 to 2.1.14"
+- "[8752](https://github.com/kubernetes/ingress-nginx/pull/8752) Bump github.com/spf13/cobra from 1.4.0 to 1.5.0"
+- "[8751](https://github.com/kubernetes/ingress-nginx/pull/8751) Bump github.com/stretchr/testify from 1.7.2 to 1.7.5"
+- "[8750](https://github.com/kubernetes/ingress-nginx/pull/8750) added announcement"
+- "[8740](https://github.com/kubernetes/ingress-nginx/pull/8740) change sha e2etestrunner and echoserver"
+- "[8738](https://github.com/kubernetes/ingress-nginx/pull/8738) Update docs to make it easier for noobs to follow step by step"
+- "[8737](https://github.com/kubernetes/ingress-nginx/pull/8737) updated baseimage sha"
+- "[8736](https://github.com/kubernetes/ingress-nginx/pull/8736) set ld-musl-path"
+- "[8733](https://github.com/kubernetes/ingress-nginx/pull/8733) feat: migrate leaderelection lock to leases"
+- "[8726](https://github.com/kubernetes/ingress-nginx/pull/8726) prometheus metric: upstream_latency_seconds"
+- "[8720](https://github.com/kubernetes/ingress-nginx/pull/8720) Ci pin deps"
+- "[8719](https://github.com/kubernetes/ingress-nginx/pull/8719) Working OpenTelemetry sidecar (base nginx image)"
+- "[8714](https://github.com/kubernetes/ingress-nginx/pull/8714) Create Openssf scorecard"
+- "[8708](https://github.com/kubernetes/ingress-nginx/pull/8708) Bump github.com/prometheus/common from 0.34.0 to 0.35.0"
+- "[8703](https://github.com/kubernetes/ingress-nginx/pull/8703) Bump actions/dependency-review-action from 1 to 2"
+- "[8701](https://github.com/kubernetes/ingress-nginx/pull/8701) Fix several typos"
+- "[8699](https://github.com/kubernetes/ingress-nginx/pull/8699) fix the gosec test and a make target for it"
+- "[8698](https://github.com/kubernetes/ingress-nginx/pull/8698) Bump actions/upload-artifact from 2.3.1 to 3.1.0"
+- "[8697](https://github.com/kubernetes/ingress-nginx/pull/8697) Bump actions/setup-go from 2.2.0 to 3.2.0"
+- "[8695](https://github.com/kubernetes/ingress-nginx/pull/8695) Bump actions/download-artifact from 2 to 3"
+- "[8694](https://github.com/kubernetes/ingress-nginx/pull/8694) Bump crazy-max/ghaction-docker-buildx from 1.6.2 to 3.3.1"
+
+### 4.1.2
+
+- "[8587](https://github.com/kubernetes/ingress-nginx/pull/8587) Add CAP_SYS_CHROOT to DS/PSP when needed"
+- "[8458](https://github.com/kubernetes/ingress-nginx/pull/8458) Add portNamePreffix Helm chart parameter"
+- "[8522](https://github.com/kubernetes/ingress-nginx/pull/8522) Add documentation for controller.service.loadBalancerIP in Helm chart"
+
+### 4.1.0
+
+- "[8481](https://github.com/kubernetes/ingress-nginx/pull/8481) Fix log creation in chroot script"
+- "[8479](https://github.com/kubernetes/ingress-nginx/pull/8479) changed nginx base img tag to img built with alpine3.14.6"
+- "[8478](https://github.com/kubernetes/ingress-nginx/pull/8478) update base images and protobuf gomod"
+- "[8468](https://github.com/kubernetes/ingress-nginx/pull/8468) Fallback to ngx.var.scheme for redirectScheme with use-forward-headers when X-Forwarded-Proto is empty"
+- "[8456](https://github.com/kubernetes/ingress-nginx/pull/8456) Implement object deep inspector"
+- "[8455](https://github.com/kubernetes/ingress-nginx/pull/8455) Update dependencies"
+- "[8454](https://github.com/kubernetes/ingress-nginx/pull/8454) Update index.md"
+- "[8447](https://github.com/kubernetes/ingress-nginx/pull/8447) typo fixing"
+- "[8446](https://github.com/kubernetes/ingress-nginx/pull/8446) Fix suggested annotation-value-word-blocklist"
+- "[8444](https://github.com/kubernetes/ingress-nginx/pull/8444) replace deprecated topology key in example with current one"
+- "[8443](https://github.com/kubernetes/ingress-nginx/pull/8443) Add dependency review enforcement"
+- "[8434](https://github.com/kubernetes/ingress-nginx/pull/8434) added new auth-tls-match-cn annotation"
+- "[8426](https://github.com/kubernetes/ingress-nginx/pull/8426) Bump github.com/prometheus/common from 0.32.1 to 0.33.0"
+
+### 4.0.18
+
+- "[8291](https://github.com/kubernetes/ingress-nginx/pull/8291) remove git tag env from cloud build"
+- "[8286](https://github.com/kubernetes/ingress-nginx/pull/8286) Fix OpenTelemetry sidecar image build"
+- "[8277](https://github.com/kubernetes/ingress-nginx/pull/8277) Add OpenSSF Best practices badge"
+- "[8273](https://github.com/kubernetes/ingress-nginx/pull/8273) Issue#8241"
+- "[8267](https://github.com/kubernetes/ingress-nginx/pull/8267) Add fsGroup value to admission-webhooks/job-patch charts"
+- "[8262](https://github.com/kubernetes/ingress-nginx/pull/8262) Updated confusing error"
+- "[8256](https://github.com/kubernetes/ingress-nginx/pull/8256) fix: deny locations with invalid auth-url annotation"
+- "[8253](https://github.com/kubernetes/ingress-nginx/pull/8253) Add a certificate info metric"
+- "[8236](https://github.com/kubernetes/ingress-nginx/pull/8236) webhook: remove useless code."
+- "[8227](https://github.com/kubernetes/ingress-nginx/pull/8227) Update libraries in webhook image"
+- "[8225](https://github.com/kubernetes/ingress-nginx/pull/8225) fix inconsistent-label-cardinality for prometheus metrics: nginx_ingress_controller_requests"
+- "[8221](https://github.com/kubernetes/ingress-nginx/pull/8221) Do not validate ingresses with unknown ingress class in admission webhook endpoint"
+- "[8210](https://github.com/kubernetes/ingress-nginx/pull/8210) Bump github.com/prometheus/client_golang from 1.11.0 to 1.12.1"
+- "[8209](https://github.com/kubernetes/ingress-nginx/pull/8209) Bump google.golang.org/grpc from 1.43.0 to 1.44.0"
+- "[8204](https://github.com/kubernetes/ingress-nginx/pull/8204) Add Artifact Hub lint"
+- "[8203](https://github.com/kubernetes/ingress-nginx/pull/8203) Fix Indentation of example and link to cert-manager tutorial"
+- "[8201](https://github.com/kubernetes/ingress-nginx/pull/8201) feat(metrics): add path and method labels to requests countera"
+- "[8199](https://github.com/kubernetes/ingress-nginx/pull/8199) use functional options to reduce number of methods creating an EchoDeployment"
+- "[8196](https://github.com/kubernetes/ingress-nginx/pull/8196) docs: fix inconsistent controller annotation"
+- "[8191](https://github.com/kubernetes/ingress-nginx/pull/8191) Using Go install for misspell"
+- "[8186](https://github.com/kubernetes/ingress-nginx/pull/8186) prometheus+grafana using servicemonitor"
+- "[8185](https://github.com/kubernetes/ingress-nginx/pull/8185) Append elements on match, instead of removing for cors-annotations"
+- "[8179](https://github.com/kubernetes/ingress-nginx/pull/8179) Bump github.com/opencontainers/runc from 1.0.3 to 1.1.0"
+- "[8173](https://github.com/kubernetes/ingress-nginx/pull/8173) Adding annotations to the controller service account"
+- "[8163](https://github.com/kubernetes/ingress-nginx/pull/8163) Update the $req_id placeholder description"
+- "[8162](https://github.com/kubernetes/ingress-nginx/pull/8162) Versioned static manifests"
+- "[8159](https://github.com/kubernetes/ingress-nginx/pull/8159) Adding some geoip variables and default values"
+- "[8155](https://github.com/kubernetes/ingress-nginx/pull/8155) #7271 feat: avoid-pdb-creation-when-default-backend-disabled-and-replicas-gt-1"
+- "[8151](https://github.com/kubernetes/ingress-nginx/pull/8151) Automatically generate helm docs"
+- "[8143](https://github.com/kubernetes/ingress-nginx/pull/8143) Allow to configure delay before controller exits"
+- "[8136](https://github.com/kubernetes/ingress-nginx/pull/8136) add ingressClass option to helm chart - back compatibility with ingress.class annotations"
+- "[8126](https://github.com/kubernetes/ingress-nginx/pull/8126) Example for JWT"
+
+
+### 4.0.15
+
+- [8120] https://github.com/kubernetes/ingress-nginx/pull/8120 Update go in runner and release v1.1.1
+- [8119] https://github.com/kubernetes/ingress-nginx/pull/8119 Update to go v1.17.6
+- [8118] https://github.com/kubernetes/ingress-nginx/pull/8118 Remove deprecated libraries, update other libs
+- [8117] https://github.com/kubernetes/ingress-nginx/pull/8117 Fix codegen errors
+- [8115] https://github.com/kubernetes/ingress-nginx/pull/8115 chart/ghaction: set the correct permission to have access to push a release
+- [8098] https://github.com/kubernetes/ingress-nginx/pull/8098 generating SHA for CA only certs in backend_ssl.go + comparison of P…
+- [8088] https://github.com/kubernetes/ingress-nginx/pull/8088 Fix Edit this page link to use main branch
+- [8072] https://github.com/kubernetes/ingress-nginx/pull/8072 Expose GeoIP2 Continent code as variable
+- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 docs(charts): using helm-docs for chart
+- [8058] https://github.com/kubernetes/ingress-nginx/pull/8058 Bump github.com/spf13/cobra from 1.2.1 to 1.3.0
+- [8054] https://github.com/kubernetes/ingress-nginx/pull/8054 Bump google.golang.org/grpc from 1.41.0 to 1.43.0
+- [8051] https://github.com/kubernetes/ingress-nginx/pull/8051 align bug report with feature request regarding kind documentation
+- [8046] https://github.com/kubernetes/ingress-nginx/pull/8046 Report expired certificates (#8045)
+- [8044] https://github.com/kubernetes/ingress-nginx/pull/8044 remove G109 check till gosec resolves issues
+- [8042] https://github.com/kubernetes/ingress-nginx/pull/8042 docs_multiple_instances_one_cluster_ticket_7543
+- [8041] https://github.com/kubernetes/ingress-nginx/pull/8041 docs: fix typo'd executable name
+- [8035] https://github.com/kubernetes/ingress-nginx/pull/8035 Comment busy owners
+- [8029] https://github.com/kubernetes/ingress-nginx/pull/8029 Add stream-snippet as a ConfigMap and Annotation option
+- [8023] https://github.com/kubernetes/ingress-nginx/pull/8023 fix nginx compilation flags
+- [8021] https://github.com/kubernetes/ingress-nginx/pull/8021 Disable default modsecurity_rules_file if modsecurity-snippet is specified
+- [8019] https://github.com/kubernetes/ingress-nginx/pull/8019 Revise main documentation page
+- [8018] https://github.com/kubernetes/ingress-nginx/pull/8018 Preserve order of plugin invocation
+- [8015] https://github.com/kubernetes/ingress-nginx/pull/8015 Add newline indenting to admission webhook annotations
+- [8014] https://github.com/kubernetes/ingress-nginx/pull/8014 Add link to example error page manifest in docs
+- [8009] https://github.com/kubernetes/ingress-nginx/pull/8009 Fix spelling in documentation and top-level files
+- [8008] https://github.com/kubernetes/ingress-nginx/pull/8008 Add relabelings in controller-servicemonitor.yaml
+- [8003] https://github.com/kubernetes/ingress-nginx/pull/8003 Minor improvements (formatting, consistency) in install guide
+- [8001] https://github.com/kubernetes/ingress-nginx/pull/8001 fix: go-grpc Dockerfile
+- [7999] https://github.com/kubernetes/ingress-nginx/pull/7999 images: use k8s-staging-test-infra/gcb-docker-gcloud
+- [7996] https://github.com/kubernetes/ingress-nginx/pull/7996 doc: improvement
+- [7983] https://github.com/kubernetes/ingress-nginx/pull/7983 Fix a couple of misspellings in the annotations documentation.
+- [7979] https://github.com/kubernetes/ingress-nginx/pull/7979 allow set annotations for admission Jobs
+- [7977] https://github.com/kubernetes/ingress-nginx/pull/7977 Add ssl_reject_handshake to default server
+- [7975] https://github.com/kubernetes/ingress-nginx/pull/7975 add legacy version update v0.50.0 to main changelog
+- [7972] https://github.com/kubernetes/ingress-nginx/pull/7972 updated service upstream definition
+
+### 4.0.14
+
+- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 Using helm-docs to populate values table in README.md
+
+### 4.0.13
+
+- [8008] https://github.com/kubernetes/ingress-nginx/pull/8008 Add relabelings in controller-servicemonitor.yaml
+
+### 4.0.12
+
+- [7978] https://github.com/kubernetes/ingress-nginx/pull/7979 Support custom annotations in admissions Jobs
+
+### 4.0.11
+
+- [7873] https://github.com/kubernetes/ingress-nginx/pull/7873 Makes the [appProtocol](https://kubernetes.io/docs/concepts/services-networking/_print/#application-protocol) field optional.
+
+### 4.0.10
+
+- [7964] https://github.com/kubernetes/ingress-nginx/pull/7964 Update controller version to v1.1.0
+
+### 4.0.9
+
+- [6992] https://github.com/kubernetes/ingress-nginx/pull/6992 Add ability to specify labels for all resources
+
+### 4.0.7
+
+- [7923] https://github.com/kubernetes/ingress-nginx/pull/7923 Release v1.0.5 of ingress-nginx
+- [7806] https://github.com/kubernetes/ingress-nginx/pull/7806 Choice option for internal/external loadbalancer type service
+
+### 4.0.6
+
+- [7804] https://github.com/kubernetes/ingress-nginx/pull/7804 Release v1.0.4 of ingress-nginx
+- [7651] https://github.com/kubernetes/ingress-nginx/pull/7651 Support ipFamilyPolicy and ipFamilies fields in Helm Chart
+- [7798] https://github.com/kubernetes/ingress-nginx/pull/7798 Exoscale: use HTTP Healthcheck mode
+- [7793] https://github.com/kubernetes/ingress-nginx/pull/7793 Update kube-webhook-certgen to v1.1.1
+
+### 4.0.5
+
+- [7740] https://github.com/kubernetes/ingress-nginx/pull/7740 Release v1.0.3 of ingress-nginx
+
+### 4.0.3
+
+- [7707] https://github.com/kubernetes/ingress-nginx/pull/7707 Release v1.0.2 of ingress-nginx
+
+### 4.0.2
+
+- [7681] https://github.com/kubernetes/ingress-nginx/pull/7681 Release v1.0.1 of ingress-nginx
+
+### 4.0.1
+
+- [7535] https://github.com/kubernetes/ingress-nginx/pull/7535 Release v1.0.0 ingress-nginx
+
+### 3.34.0
+
+- [7256] https://github.com/kubernetes/ingress-nginx/pull/7256 Add namespace field in the namespace scoped resource templates
+
+### 3.33.0
+
+- [7164] https://github.com/kubernetes/ingress-nginx/pull/7164 Update nginx to v1.20.1
+
+### 3.32.0
+
+- [7117] https://github.com/kubernetes/ingress-nginx/pull/7117 Add annotations for HPA
+
+### 3.31.0
+
+- [7137] https://github.com/kubernetes/ingress-nginx/pull/7137 Add support for custom probes
+
+### 3.30.0
+
+- [#7092](https://github.com/kubernetes/ingress-nginx/pull/7092) Removes the possibility of using localhost in ExternalNames as endpoints
+
+### 3.29.0
+
+- [X] [#6945](https://github.com/kubernetes/ingress-nginx/pull/7020) Add option to specify job label for ServiceMonitor
+
+### 3.28.0
+
+- [ ] [#6900](https://github.com/kubernetes/ingress-nginx/pull/6900) Support existing PSPs
+
+### 3.27.0
+
+- Update ingress-nginx v0.45.0
+
+### 3.26.0
+
+- [X] [#6979](https://github.com/kubernetes/ingress-nginx/pull/6979) Changed servicePort value for metrics
+
+### 3.25.0
+
+- [X] [#6957](https://github.com/kubernetes/ingress-nginx/pull/6957) Add ability to specify automountServiceAccountToken
+
+### 3.24.0
+
+- [X] [#6908](https://github.com/kubernetes/ingress-nginx/pull/6908) Add volumes to default-backend deployment
+
+### 3.23.0
+
+- Update ingress-nginx v0.44.0
+
+### 3.22.0
+
+- [X] [#6802](https://github.com/kubernetes/ingress-nginx/pull/6802) Add value for configuring a custom Diffie-Hellman parameters file
+- [X] [#6815](https://github.com/kubernetes/ingress-nginx/pull/6815) Allow use of numeric namespaces in helm chart
+
+### 3.21.0
+
+- [X] [#6783](https://github.com/kubernetes/ingress-nginx/pull/6783) Add custom annotations to ScaledObject
+- [X] [#6761](https://github.com/kubernetes/ingress-nginx/pull/6761) Adding quotes in the serviceAccount name in Helm values
+- [X] [#6767](https://github.com/kubernetes/ingress-nginx/pull/6767) Remove ClusterRole when scope option is enabled
+- [X] [#6785](https://github.com/kubernetes/ingress-nginx/pull/6785) Update kube-webhook-certgen image to v1.5.1
+
+### 3.20.1
+
+- Do not create KEDA in case of DaemonSets.
+- Fix KEDA v2 definition
+
+### 3.20.0
+
+- [X] [#6730](https://github.com/kubernetes/ingress-nginx/pull/6730) Do not create HPA for defaultBackend if not enabled.
+
+### 3.19.0
+
+- Update ingress-nginx v0.43.0
+
+### 3.18.0
+
+- [X] [#6688](https://github.com/kubernetes/ingress-nginx/pull/6688) Allow volume-type emptyDir in controller podsecuritypolicy
+- [X] [#6691](https://github.com/kubernetes/ingress-nginx/pull/6691) Improve parsing of helm parameters
+
+### 3.17.0
+
+- Update ingress-nginx v0.42.0
+
+### 3.16.1
+
+- Fix chart-releaser action
+
+### 3.16.0
+
+- [X] [#6646](https://github.com/kubernetes/ingress-nginx/pull/6646) Added LoadBalancerIP value for internal service
+
+### 3.15.1
+
+- Fix chart-releaser action
+
+### 3.15.0
+
+- [X] [#6586](https://github.com/kubernetes/ingress-nginx/pull/6586) Fix 'maxmindLicenseKey' location in values.yaml
+
+### 3.14.0
+
+- [X] [#6469](https://github.com/kubernetes/ingress-nginx/pull/6469) Allow custom service names for controller and backend
+
+### 3.13.0
+
+- [X] [#6544](https://github.com/kubernetes/ingress-nginx/pull/6544) Fix default backend HPA name variable
+
+### 3.12.0
+
+- [X] [#6514](https://github.com/kubernetes/ingress-nginx/pull/6514) Remove helm2 support and update docs
+
+### 3.11.1
+
+- [X] [#6505](https://github.com/kubernetes/ingress-nginx/pull/6505) Reorder HPA resource list to work with GitOps tooling
+
+### 3.11.0
+
+- Support Keda Autoscaling
+
+### 3.10.1
+
+- Fix regression introduced in 0.41.0 with external authentication
+
+### 3.10.0
+
+- Fix routing regression introduced in 0.41.0 with PathType Exact
+
+### 3.9.0
+
+- [X] [#6423](https://github.com/kubernetes/ingress-nginx/pull/6423) Add Default backend HPA autoscaling
+
+### 3.8.0
+
+- [X] [#6395](https://github.com/kubernetes/ingress-nginx/pull/6395) Update jettech/kube-webhook-certgen image
+- [X] [#6377](https://github.com/kubernetes/ingress-nginx/pull/6377) Added loadBalancerSourceRanges for internal lbs
+- [X] [#6356](https://github.com/kubernetes/ingress-nginx/pull/6356) Add securitycontext settings on defaultbackend
+- [X] [#6401](https://github.com/kubernetes/ingress-nginx/pull/6401) Fix controller service annotations
+- [X] [#6403](https://github.com/kubernetes/ingress-nginx/pull/6403) Initial helm chart changelog
+
+### 3.7.1
+
+- [X] [#6326](https://github.com/kubernetes/ingress-nginx/pull/6326) Fix liveness and readiness probe path in daemonset chart
+
+### 3.7.0
+
+- [X] [#6316](https://github.com/kubernetes/ingress-nginx/pull/6316) Numerals in podAnnotations in quotes [#6315](https://github.com/kubernetes/ingress-nginx/issues/6315)
+
+### 3.6.0
+
+- [X] [#6305](https://github.com/kubernetes/ingress-nginx/pull/6305) Add default linux nodeSelector
+
+### 3.5.1
+
+- [X] [#6299](https://github.com/kubernetes/ingress-nginx/pull/6299) Fix helm chart release
+
+### 3.5.0
+
+- [X] [#6260](https://github.com/kubernetes/ingress-nginx/pull/6260) Allow Helm Chart to customize admission webhook's annotations, timeoutSeconds, namespaceSelector, objectSelector and cert files locations
+
+### 3.4.0
+
+- [X] [#6268](https://github.com/kubernetes/ingress-nginx/pull/6268) Update to 0.40.2 in helm chart #6288
+
+### 3.3.1
+
+- [X] [#6259](https://github.com/kubernetes/ingress-nginx/pull/6259) Release helm chart
+- [X] [#6258](https://github.com/kubernetes/ingress-nginx/pull/6258) Fix chart markdown link
+- [X] [#6253](https://github.com/kubernetes/ingress-nginx/pull/6253) Release v0.40.0
+
+### 3.3.1
+
+- [X] [#6233](https://github.com/kubernetes/ingress-nginx/pull/6233) Add admission controller e2e test
+
+### 3.3.0
+
+- [X] [#6203](https://github.com/kubernetes/ingress-nginx/pull/6203) Refactor parsing of key values
+- [X] [#6162](https://github.com/kubernetes/ingress-nginx/pull/6162) Add helm chart options to expose metrics service as NodePort
+- [X] [#6180](https://github.com/kubernetes/ingress-nginx/pull/6180) Fix helm chart admissionReviewVersions regression
+- [X] [#6169](https://github.com/kubernetes/ingress-nginx/pull/6169) Fix Typo in example prometheus rules
+
+### 3.0.0
+
+- [X] [#6167](https://github.com/kubernetes/ingress-nginx/pull/6167) Update chart requirements
+
+### 2.16.0
+
+- [X] [#6154](https://github.com/kubernetes/ingress-nginx/pull/6154) add `topologySpreadConstraint` to controller
+
+### 2.15.0
+
+- [X] [#6087](https://github.com/kubernetes/ingress-nginx/pull/6087) Adding parameter for externalTrafficPolicy in internal controller service spec
+
+### 2.14.0
+
+- [X] [#6104](https://github.com/kubernetes/ingress-nginx/pull/6104) Misc fixes for nginx-ingress chart for better keel and prometheus-operator integration
+
+### 2.13.0
+
+- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0
+
+### 2.13.0
+
+- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0
+- [X] [#6080](https://github.com/kubernetes/ingress-nginx/pull/6080) Switch images to k8s.gcr.io after Vanity Domain Flip
+
+### 2.12.1
+
+- [X] [#6075](https://github.com/kubernetes/ingress-nginx/pull/6075) Sync helm chart affinity examples
+
+### 2.12.0
+
+- [X] [#6039](https://github.com/kubernetes/ingress-nginx/pull/6039) Add configurable serviceMonitor metricRelabelling and targetLabels
+- [X] [#6044](https://github.com/kubernetes/ingress-nginx/pull/6044) Fix YAML linting
+
+### 2.11.3
+
+- [X] [#6038](https://github.com/kubernetes/ingress-nginx/pull/6038) Bump chart version PATCH
+
+### 2.11.2
+
+- [X] [#5951](https://github.com/kubernetes/ingress-nginx/pull/5951) Bump chart patch version
+
+### 2.11.1
+
+- [X] [#5900](https://github.com/kubernetes/ingress-nginx/pull/5900) Release helm chart for v0.34.1
+
+### 2.11.0
+
+- [X] [#5879](https://github.com/kubernetes/ingress-nginx/pull/5879) Update helm chart for v0.34.0
+- [X] [#5671](https://github.com/kubernetes/ingress-nginx/pull/5671) Make liveness probe more fault tolerant than readiness probe
+
+### 2.10.0
+
+- [X] [#5843](https://github.com/kubernetes/ingress-nginx/pull/5843) Update jettech/kube-webhook-certgen image
+
+### 2.9.1
+
+- [X] [#5823](https://github.com/kubernetes/ingress-nginx/pull/5823) Add quoting to sysctls because numeric values need to be presented as strings (#5823)
+
+### 2.9.0
+
+- [X] [#5795](https://github.com/kubernetes/ingress-nginx/pull/5795) Use fully qualified images to avoid cri-o issues
+
+
+### TODO
+
+Keep building the changelog using *git log charts* checking the tag
diff --git a/charts/ingress-nginx/Chart.yaml b/charts/ingress-nginx/Chart.yaml
new file mode 100644
index 0000000..b2e6c06
--- /dev/null
+++ b/charts/ingress-nginx/Chart.yaml
@@ -0,0 +1,24 @@
+annotations:
+ artifacthub.io/changes: |
+ - "helm: Fix opentelemetry module installation for daemonset (#9792)"
+ - "Update charts/* to keep project name display aligned (#9931)"
+ - "Update Ingress-Nginx version controller-v1.8.0"
+ artifacthub.io/prerelease: "false"
+apiVersion: v2
+appVersion: 1.8.0
+description: Ingress controller for Kubernetes using NGINX as a reverse proxy and
+ load balancer
+home: https://github.com/kubernetes/ingress-nginx
+icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png
+keywords:
+- ingress
+- nginx
+kubeVersion: '>=1.20.0-0'
+maintainers:
+- name: rikatz
+- name: strongjz
+- name: tao12345666333
+name: ingress-nginx
+sources:
+- https://github.com/kubernetes/ingress-nginx
+version: 4.7.0
diff --git a/charts/ingress-nginx/OWNERS b/charts/ingress-nginx/OWNERS
new file mode 100644
index 0000000..6b7e049
--- /dev/null
+++ b/charts/ingress-nginx/OWNERS
@@ -0,0 +1,10 @@
+# See the OWNERS docs: https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md
+
+approvers:
+- ingress-nginx-helm-maintainers
+
+reviewers:
+- ingress-nginx-helm-reviewers
+
+labels:
+- area/helm
diff --git a/charts/ingress-nginx/README.md b/charts/ingress-nginx/README.md
new file mode 100644
index 0000000..4a00815
--- /dev/null
+++ b/charts/ingress-nginx/README.md
@@ -0,0 +1,501 @@
+# ingress-nginx
+
+[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer
+
+ 
+
+To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources.
+
+This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+## Requirements
+
+Kubernetes: `>=1.20.0-0`
+
+## Get Repo Info
+
+```console
+helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
+helm repo update
+```
+
+## Install Chart
+
+**Important:** only helm3 is supported
+
+```console
+helm install [RELEASE_NAME] ingress-nginx/ingress-nginx
+```
+
+The command deploys ingress-nginx on the Kubernetes cluster in the default configuration.
+
+_See [configuration](#configuration) below._
+
+_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
+
+## Uninstall Chart
+
+```console
+helm uninstall [RELEASE_NAME]
+```
+
+This removes all the Kubernetes components associated with the chart and deletes the release.
+
+_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
+
+## Upgrading Chart
+
+```console
+helm upgrade [RELEASE_NAME] [CHART] --install
+```
+
+_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
+
+### Migrating from stable/nginx-ingress
+
+There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart:
+
+1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one
+1. For critical services in production that require zero-downtime, you will want to:
+ 1. [Install](#install-chart) a second Ingress controller
+ 1. Redirect your DNS traffic from the old controller to the new controller
+ 1. Log traffic from both controllers during this changeover
+ 1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it
+
+Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts.
+
+## Configuration
+
+See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands:
+
+```console
+helm show values ingress-nginx/ingress-nginx
+```
+
+### PodDisruptionBudget
+
+Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one,
+else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info.
+
+### Prometheus Metrics
+
+The Ingress-Nginx Controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`.
+
+You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`.
+Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`)
+
+### ingress-nginx nginx\_status page/stats server
+
+Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in Ingress-Nginx Controller:
+
+- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed
+- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost.
+ You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server
+
+### ExternalDNS Service Configuration
+
+Add an [ExternalDNS](https://github.com/kubernetes-sigs/external-dns) annotation to the LoadBalancer service:
+
+```yaml
+controller:
+ service:
+ annotations:
+ external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com.
+```
+
+### AWS L7 ELB with SSL Termination
+
+Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/ab3a789caae65eec4ad6e3b46b19750b481b6bce/deploy/aws/l7/service-l7.yaml):
+
+```yaml
+controller:
+ service:
+ targetPorts:
+ http: http
+ https: http
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX
+ service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http"
+ service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https"
+ service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600'
+```
+
+### Additional Internal Load Balancer
+
+This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application.
+
+By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL.
+
+You'll need to set both the following values:
+
+`controller.service.internal.enabled`
+`controller.service.internal.annotations`
+
+If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken.
+
+`controller.service.internal.annotations` varies with the cloud service you're using.
+
+Example for AWS:
+
+```yaml
+controller:
+ service:
+ internal:
+ enabled: true
+ annotations:
+ # Create internal ELB
+ service.beta.kubernetes.io/aws-load-balancer-internal: "true"
+ # Any other annotation can be declared here.
+```
+
+Example for GCE:
+
+```yaml
+controller:
+ service:
+ internal:
+ enabled: true
+ annotations:
+ # Create internal LB. More information: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing
+ # For GKE versions 1.17 and later
+ networking.gke.io/load-balancer-type: "Internal"
+ # For earlier versions
+ # cloud.google.com/load-balancer-type: "Internal"
+
+ # Any other annotation can be declared here.
+```
+
+Example for Azure:
+
+```yaml
+controller:
+ service:
+ annotations:
+ # Create internal LB
+ service.beta.kubernetes.io/azure-load-balancer-internal: "true"
+ # Any other annotation can be declared here.
+```
+
+Example for Oracle Cloud Infrastructure:
+
+```yaml
+controller:
+ service:
+ annotations:
+ # Create internal LB
+ service.beta.kubernetes.io/oci-load-balancer-internal: "true"
+ # Any other annotation can be declared here.
+```
+
+An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object.
+
+Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`.
+
+### Ingress Admission Webhooks
+
+With nginx-ingress-controller version 0.25+, the Ingress-Nginx Controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster.
+**This feature is enabled by default since 0.31.0.**
+
+With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521)
+
+#### How the Chart Configures the Hooks
+A validating and configuration requires the endpoint to which the request is sent to use TLS. It is possible to set up custom certificates to do this, but in most cases, a self-signed certificate is enough. The setup of this component requires some more complex orchestration when using helm. The steps are created to be idempotent and to allow turning the feature on and off without running into helm quirks.
+
+1. A pre-install hook provisions a certificate into the same namespace using a format compatible with provisioning using end user certificates. If the certificate already exists, the hook exits.
+2. The Ingress-Nginx Controller pod is configured to use a TLS proxy container, which will load that certificate.
+3. Validating and Mutating webhook configurations are created in the cluster.
+4. A post-install hook reads the CA from the secret created by step 1 and patches the Validating and Mutating webhook configurations. This process will allow a custom CA provisioned by some other process to also be patched into the webhook configurations. The chosen failure policy is also patched into the webhook configurations
+
+#### Alternatives
+It should be possible to use [cert-manager/cert-manager](https://github.com/cert-manager/cert-manager) if a more complete solution is required.
+
+You can enable automatic self-signed TLS certificate provisioning via cert-manager by setting the `controller.admissionWebhooks.certManager.enabled` value to true.
+
+Please ensure that cert-manager is correctly installed and configured.
+
+### Helm Error When Upgrading: spec.clusterIP: Invalid value: ""
+
+If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this:
+
+```console
+Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable
+```
+
+Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error.
+
+As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered.
+
+## Values
+
+| Key | Type | Default | Description |
+|-----|------|---------|-------------|
+| commonLabels | object | `{}` | |
+| controller.addHeaders | object | `{}` | Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers |
+| controller.admissionWebhooks.annotations | object | `{}` | |
+| controller.admissionWebhooks.certManager.admissionCert.duration | string | `""` | |
+| controller.admissionWebhooks.certManager.enabled | bool | `false` | |
+| controller.admissionWebhooks.certManager.rootCert.duration | string | `""` | |
+| controller.admissionWebhooks.certificate | string | `"/usr/local/certificates/cert"` | |
+| controller.admissionWebhooks.createSecretJob.resources | object | `{}` | |
+| controller.admissionWebhooks.createSecretJob.securityContext.allowPrivilegeEscalation | bool | `false` | |
+| controller.admissionWebhooks.enabled | bool | `true` | |
+| controller.admissionWebhooks.existingPsp | string | `""` | Use an existing PSP instead of creating one |
+| controller.admissionWebhooks.extraEnvs | list | `[]` | Additional environment variables to set |
+| controller.admissionWebhooks.failurePolicy | string | `"Fail"` | Admission Webhook failure policy to use |
+| controller.admissionWebhooks.key | string | `"/usr/local/certificates/key"` | |
+| controller.admissionWebhooks.labels | object | `{}` | Labels to be added to admission webhooks |
+| controller.admissionWebhooks.namespaceSelector | object | `{}` | |
+| controller.admissionWebhooks.networkPolicyEnabled | bool | `false` | |
+| controller.admissionWebhooks.objectSelector | object | `{}` | |
+| controller.admissionWebhooks.patch.enabled | bool | `true` | |
+| controller.admissionWebhooks.patch.image.digest | string | `"sha256:543c40fd093964bc9ab509d3e791f9989963021f1e9e4c9c7b6700b02bfb227b"` | |
+| controller.admissionWebhooks.patch.image.image | string | `"ingress-nginx/kube-webhook-certgen"` | |
+| controller.admissionWebhooks.patch.image.pullPolicy | string | `"IfNotPresent"` | |
+| controller.admissionWebhooks.patch.image.registry | string | `"registry.k8s.io"` | |
+| controller.admissionWebhooks.patch.image.tag | string | `"v20230407"` | |
+| controller.admissionWebhooks.patch.labels | object | `{}` | Labels to be added to patch job resources |
+| controller.admissionWebhooks.patch.nodeSelector."kubernetes.io/os" | string | `"linux"` | |
+| controller.admissionWebhooks.patch.podAnnotations | object | `{}` | |
+| controller.admissionWebhooks.patch.priorityClassName | string | `""` | Provide a priority class name to the webhook patching job # |
+| controller.admissionWebhooks.patch.securityContext.fsGroup | int | `2000` | |
+| controller.admissionWebhooks.patch.securityContext.runAsNonRoot | bool | `true` | |
+| controller.admissionWebhooks.patch.securityContext.runAsUser | int | `2000` | |
+| controller.admissionWebhooks.patch.tolerations | list | `[]` | |
+| controller.admissionWebhooks.patchWebhookJob.resources | object | `{}` | |
+| controller.admissionWebhooks.patchWebhookJob.securityContext.allowPrivilegeEscalation | bool | `false` | |
+| controller.admissionWebhooks.port | int | `8443` | |
+| controller.admissionWebhooks.service.annotations | object | `{}` | |
+| controller.admissionWebhooks.service.externalIPs | list | `[]` | |
+| controller.admissionWebhooks.service.loadBalancerSourceRanges | list | `[]` | |
+| controller.admissionWebhooks.service.servicePort | int | `443` | |
+| controller.admissionWebhooks.service.type | string | `"ClusterIP"` | |
+| controller.affinity | object | `{}` | Affinity and anti-affinity rules for server scheduling to nodes # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity # |
+| controller.allowSnippetAnnotations | bool | `true` | This configuration defines if Ingress Controller should allow users to set their own *-snippet annotations, otherwise this is forbidden / dropped when users add those annotations. Global snippets in ConfigMap are still respected |
+| controller.annotations | object | `{}` | Annotations to be added to the controller Deployment or DaemonSet # |
+| controller.autoscaling.annotations | object | `{}` | |
+| controller.autoscaling.behavior | object | `{}` | |
+| controller.autoscaling.enabled | bool | `false` | |
+| controller.autoscaling.maxReplicas | int | `11` | |
+| controller.autoscaling.minReplicas | int | `1` | |
+| controller.autoscaling.targetCPUUtilizationPercentage | int | `50` | |
+| controller.autoscaling.targetMemoryUtilizationPercentage | int | `50` | |
+| controller.autoscalingTemplate | list | `[]` | |
+| controller.config | object | `{}` | Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ |
+| controller.configAnnotations | object | `{}` | Annotations to be added to the controller config configuration configmap. |
+| controller.configMapNamespace | string | `""` | Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) |
+| controller.containerName | string | `"controller"` | Configures the controller container name |
+| controller.containerPort | object | `{"http":80,"https":443}` | Configures the ports that the nginx-controller listens on |
+| controller.customTemplate.configMapKey | string | `""` | |
+| controller.customTemplate.configMapName | string | `""` | |
+| controller.dnsConfig | object | `{}` | Optionally customize the pod dnsConfig. |
+| controller.dnsPolicy | string | `"ClusterFirst"` | Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. |
+| controller.electionID | string | `""` | Election ID to use for status update, by default it uses the controller name combined with a suffix of 'leader' |
+| controller.enableMimalloc | bool | `true` | Enable mimalloc as a drop-in replacement for malloc. # ref: https://github.com/microsoft/mimalloc # |
+| controller.enableTopologyAwareRouting | bool | `false` | This configuration enables Topology Aware Routing feature, used together with service annotation service.kubernetes.io/topology-aware-hints="auto" Defaults to false |
+| controller.existingPsp | string | `""` | Use an existing PSP instead of creating one |
+| controller.extraArgs | object | `{}` | Additional command line arguments to pass to Ingress-Nginx Controller E.g. to specify the default SSL certificate you can use |
+| controller.extraContainers | list | `[]` | Additional containers to be added to the controller pod. See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. |
+| controller.extraEnvs | list | `[]` | Additional environment variables to set |
+| controller.extraInitContainers | list | `[]` | Containers, which are run before the app containers are started. |
+| controller.extraModules | list | `[]` | Modules, which are mounted into the core nginx image. See values.yaml for a sample to add opentelemetry module |
+| controller.extraVolumeMounts | list | `[]` | Additional volumeMounts to the controller main container. |
+| controller.extraVolumes | list | `[]` | Additional volumes to the controller pod. |
+| controller.healthCheckHost | string | `""` | Address to bind the health check endpoint. It is better to set this option to the internal node address if the Ingress-Nginx Controller is running in the `hostNetwork: true` mode. |
+| controller.healthCheckPath | string | `"/healthz"` | Path of the health check endpoint. All requests received on the port defined by the healthz-port parameter are forwarded internally to this path. |
+| controller.hostNetwork | bool | `false` | Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 is merged |
+| controller.hostPort.enabled | bool | `false` | Enable 'hostPort' or not |
+| controller.hostPort.ports.http | int | `80` | 'hostPort' http port |
+| controller.hostPort.ports.https | int | `443` | 'hostPort' https port |
+| controller.hostname | object | `{}` | Optionally customize the pod hostname. |
+| controller.image.allowPrivilegeEscalation | bool | `true` | |
+| controller.image.chroot | bool | `false` | |
+| controller.image.digest | string | `"sha256:744ae2afd433a395eeb13dc03d3313facba92e96ad71d9feaafc85925493fee3"` | |
+| controller.image.digestChroot | string | `"sha256:a45e41cd2b7670adf829759878f512d4208d0aec1869dae593a0fecd09a5e49e"` | |
+| controller.image.image | string | `"ingress-nginx/controller"` | |
+| controller.image.pullPolicy | string | `"IfNotPresent"` | |
+| controller.image.registry | string | `"registry.k8s.io"` | |
+| controller.image.runAsUser | int | `101` | |
+| controller.image.tag | string | `"v1.8.0"` | |
+| controller.ingressClass | string | `"nginx"` | For backwards compatibility with ingress.class annotation, use ingressClass. Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation |
+| controller.ingressClassByName | bool | `false` | Process IngressClass per name (additionally as per spec.controller). |
+| controller.ingressClassResource.controllerValue | string | `"k8s.io/ingress-nginx"` | Controller-value of the controller that is processing this ingressClass |
+| controller.ingressClassResource.default | bool | `false` | Is this the default ingressClass for the cluster |
+| controller.ingressClassResource.enabled | bool | `true` | Is this ingressClass enabled or not |
+| controller.ingressClassResource.name | string | `"nginx"` | Name of the ingressClass |
+| controller.ingressClassResource.parameters | object | `{}` | Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters. |
+| controller.keda.apiVersion | string | `"keda.sh/v1alpha1"` | |
+| controller.keda.behavior | object | `{}` | |
+| controller.keda.cooldownPeriod | int | `300` | |
+| controller.keda.enabled | bool | `false` | |
+| controller.keda.maxReplicas | int | `11` | |
+| controller.keda.minReplicas | int | `1` | |
+| controller.keda.pollingInterval | int | `30` | |
+| controller.keda.restoreToOriginalReplicaCount | bool | `false` | |
+| controller.keda.scaledObject.annotations | object | `{}` | |
+| controller.keda.triggers | list | `[]` | |
+| controller.kind | string | `"Deployment"` | Use a `DaemonSet` or `Deployment` |
+| controller.labels | object | `{}` | Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels # |
+| controller.lifecycle | object | `{"preStop":{"exec":{"command":["/wait-shutdown"]}}}` | Improve connection draining when ingress controller pod is deleted using a lifecycle hook: With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds to 300, allowing the draining of connections up to five minutes. If the active connections end before that, the pod will terminate gracefully at that time. To effectively take advantage of this feature, the Configmap feature worker-shutdown-timeout new value is 240s instead of 10s. # |
+| controller.livenessProbe.failureThreshold | int | `5` | |
+| controller.livenessProbe.httpGet.path | string | `"/healthz"` | |
+| controller.livenessProbe.httpGet.port | int | `10254` | |
+| controller.livenessProbe.httpGet.scheme | string | `"HTTP"` | |
+| controller.livenessProbe.initialDelaySeconds | int | `10` | |
+| controller.livenessProbe.periodSeconds | int | `10` | |
+| controller.livenessProbe.successThreshold | int | `1` | |
+| controller.livenessProbe.timeoutSeconds | int | `1` | |
+| controller.maxmindLicenseKey | string | `""` | Maxmind license key to download GeoLite2 Databases. # https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases |
+| controller.metrics.enabled | bool | `false` | |
+| controller.metrics.port | int | `10254` | |
+| controller.metrics.portName | string | `"metrics"` | |
+| controller.metrics.prometheusRule.additionalLabels | object | `{}` | |
+| controller.metrics.prometheusRule.enabled | bool | `false` | |
+| controller.metrics.prometheusRule.rules | list | `[]` | |
+| controller.metrics.service.annotations | object | `{}` | |
+| controller.metrics.service.externalIPs | list | `[]` | List of IP addresses at which the stats-exporter service is available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # |
+| controller.metrics.service.labels | object | `{}` | Labels to be added to the metrics service resource |
+| controller.metrics.service.loadBalancerSourceRanges | list | `[]` | |
+| controller.metrics.service.servicePort | int | `10254` | |
+| controller.metrics.service.type | string | `"ClusterIP"` | |
+| controller.metrics.serviceMonitor.additionalLabels | object | `{}` | |
+| controller.metrics.serviceMonitor.enabled | bool | `false` | |
+| controller.metrics.serviceMonitor.metricRelabelings | list | `[]` | |
+| controller.metrics.serviceMonitor.namespace | string | `""` | |
+| controller.metrics.serviceMonitor.namespaceSelector | object | `{}` | |
+| controller.metrics.serviceMonitor.relabelings | list | `[]` | |
+| controller.metrics.serviceMonitor.scrapeInterval | string | `"30s"` | |
+| controller.metrics.serviceMonitor.targetLabels | list | `[]` | |
+| controller.minAvailable | int | `1` | Minimum available pods set in PodDisruptionBudget. Define either 'minAvailable' or 'maxUnavailable', never both. |
+| controller.minReadySeconds | int | `0` | `minReadySeconds` to avoid killing pods before we are ready # |
+| controller.name | string | `"controller"` | |
+| controller.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for controller pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # |
+| controller.opentelemetry.containerSecurityContext.allowPrivilegeEscalation | bool | `false` | |
+| controller.opentelemetry.enabled | bool | `false` | |
+| controller.opentelemetry.image | string | `"registry.k8s.io/ingress-nginx/opentelemetry:v20230527@sha256:fd7ec835f31b7b37187238eb4fdad4438806e69f413a203796263131f4f02ed0"` | |
+| controller.podAnnotations | object | `{}` | Annotations to be added to controller pods # |
+| controller.podLabels | object | `{}` | Labels to add to the pod container metadata |
+| controller.podSecurityContext | object | `{}` | Security Context policies for controller pods |
+| controller.priorityClassName | string | `""` | |
+| controller.proxySetHeaders | object | `{}` | Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers |
+| controller.publishService | object | `{"enabled":true,"pathOverride":""}` | Allows customization of the source of the IP address or FQDN to report in the ingress status field. By default, it reads the information provided by the service. If disable, the status field reports the IP address of the node or nodes where an ingress controller pod is running. |
+| controller.publishService.enabled | bool | `true` | Enable 'publishService' or not |
+| controller.publishService.pathOverride | string | `""` | Allows overriding of the publish service to bind to Must be <namespace>/<service_name> |
+| controller.readinessProbe.failureThreshold | int | `3` | |
+| controller.readinessProbe.httpGet.path | string | `"/healthz"` | |
+| controller.readinessProbe.httpGet.port | int | `10254` | |
+| controller.readinessProbe.httpGet.scheme | string | `"HTTP"` | |
+| controller.readinessProbe.initialDelaySeconds | int | `10` | |
+| controller.readinessProbe.periodSeconds | int | `10` | |
+| controller.readinessProbe.successThreshold | int | `1` | |
+| controller.readinessProbe.timeoutSeconds | int | `1` | |
+| controller.replicaCount | int | `1` | |
+| controller.reportNodeInternalIp | bool | `false` | Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network Ingress status was blank because there is no Service exposing the Ingress-Nginx Controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply |
+| controller.resources.requests.cpu | string | `"100m"` | |
+| controller.resources.requests.memory | string | `"90Mi"` | |
+| controller.scope.enabled | bool | `false` | Enable 'scope' or not |
+| controller.scope.namespace | string | `""` | Namespace to limit the controller to; defaults to $(POD_NAMESPACE) |
+| controller.scope.namespaceSelector | string | `""` | When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. |
+| controller.service.annotations | object | `{}` | |
+| controller.service.appProtocol | bool | `true` | If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http It allows choosing the protocol for each backend specified in the Kubernetes service. See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 Will be ignored for Kubernetes versions older than 1.20 # |
+| controller.service.enableHttp | bool | `true` | |
+| controller.service.enableHttps | bool | `true` | |
+| controller.service.enabled | bool | `true` | |
+| controller.service.external.enabled | bool | `true` | |
+| controller.service.externalIPs | list | `[]` | List of IP addresses at which the controller services are available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # |
+| controller.service.internal.annotations | object | `{}` | Annotations are mandatory for the load balancer to come up. Varies with the cloud service. |
+| controller.service.internal.enabled | bool | `false` | Enables an additional internal load balancer (besides the external one). |
+| controller.service.internal.loadBalancerSourceRanges | list | `[]` | Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. |
+| controller.service.internal.ports | object | `{}` | Custom port mapping for internal service |
+| controller.service.internal.targetPorts | object | `{}` | Custom target port mapping for internal service |
+| controller.service.ipFamilies | list | `["IPv4"]` | List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ |
+| controller.service.ipFamilyPolicy | string | `"SingleStack"` | Represents the dual-stack-ness requested or required by this Service. Possible values are SingleStack, PreferDualStack or RequireDualStack. The ipFamilies and clusterIPs fields depend on the value of this field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ |
+| controller.service.labels | object | `{}` | |
+| controller.service.loadBalancerIP | string | `""` | Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer |
+| controller.service.loadBalancerSourceRanges | list | `[]` | |
+| controller.service.nodePorts.http | string | `""` | |
+| controller.service.nodePorts.https | string | `""` | |
+| controller.service.nodePorts.tcp | object | `{}` | |
+| controller.service.nodePorts.udp | object | `{}` | |
+| controller.service.ports.http | int | `80` | |
+| controller.service.ports.https | int | `443` | |
+| controller.service.targetPorts.http | string | `"http"` | |
+| controller.service.targetPorts.https | string | `"https"` | |
+| controller.service.type | string | `"LoadBalancer"` | |
+| controller.shareProcessNamespace | bool | `false` | |
+| controller.sysctls | object | `{}` | See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls |
+| controller.tcp.annotations | object | `{}` | Annotations to be added to the tcp config configmap |
+| controller.tcp.configMapNamespace | string | `""` | Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) |
+| controller.terminationGracePeriodSeconds | int | `300` | `terminationGracePeriodSeconds` to avoid killing pods before we are ready # wait up to five minutes for the drain of connections # |
+| controller.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # |
+| controller.topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. # Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ # |
+| controller.udp.annotations | object | `{}` | Annotations to be added to the udp config configmap |
+| controller.udp.configMapNamespace | string | `""` | Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) |
+| controller.updateStrategy | object | `{}` | The update strategy to apply to the Deployment or DaemonSet # |
+| controller.watchIngressWithoutClass | bool | `false` | Process Ingress objects without ingressClass annotation/ingressClassName field Overrides value for --watch-ingress-without-class flag of the controller binary Defaults to false |
+| defaultBackend.affinity | object | `{}` | |
+| defaultBackend.autoscaling.annotations | object | `{}` | |
+| defaultBackend.autoscaling.enabled | bool | `false` | |
+| defaultBackend.autoscaling.maxReplicas | int | `2` | |
+| defaultBackend.autoscaling.minReplicas | int | `1` | |
+| defaultBackend.autoscaling.targetCPUUtilizationPercentage | int | `50` | |
+| defaultBackend.autoscaling.targetMemoryUtilizationPercentage | int | `50` | |
+| defaultBackend.containerSecurityContext | object | `{}` | Security Context policies for controller main container. See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # |
+| defaultBackend.enabled | bool | `false` | |
+| defaultBackend.existingPsp | string | `""` | Use an existing PSP instead of creating one |
+| defaultBackend.extraArgs | object | `{}` | |
+| defaultBackend.extraEnvs | list | `[]` | Additional environment variables to set for defaultBackend pods |
+| defaultBackend.extraVolumeMounts | list | `[]` | |
+| defaultBackend.extraVolumes | list | `[]` | |
+| defaultBackend.image.allowPrivilegeEscalation | bool | `false` | |
+| defaultBackend.image.image | string | `"defaultbackend-amd64"` | |
+| defaultBackend.image.pullPolicy | string | `"IfNotPresent"` | |
+| defaultBackend.image.readOnlyRootFilesystem | bool | `true` | |
+| defaultBackend.image.registry | string | `"registry.k8s.io"` | |
+| defaultBackend.image.runAsNonRoot | bool | `true` | |
+| defaultBackend.image.runAsUser | int | `65534` | |
+| defaultBackend.image.tag | string | `"1.5"` | |
+| defaultBackend.labels | object | `{}` | Labels to be added to the default backend resources |
+| defaultBackend.livenessProbe.failureThreshold | int | `3` | |
+| defaultBackend.livenessProbe.initialDelaySeconds | int | `30` | |
+| defaultBackend.livenessProbe.periodSeconds | int | `10` | |
+| defaultBackend.livenessProbe.successThreshold | int | `1` | |
+| defaultBackend.livenessProbe.timeoutSeconds | int | `5` | |
+| defaultBackend.minAvailable | int | `1` | |
+| defaultBackend.minReadySeconds | int | `0` | `minReadySeconds` to avoid killing pods before we are ready # |
+| defaultBackend.name | string | `"defaultbackend"` | |
+| defaultBackend.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for default backend pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # |
+| defaultBackend.podAnnotations | object | `{}` | Annotations to be added to default backend pods # |
+| defaultBackend.podLabels | object | `{}` | Labels to add to the pod container metadata |
+| defaultBackend.podSecurityContext | object | `{}` | Security Context policies for controller pods See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # |
+| defaultBackend.port | int | `8080` | |
+| defaultBackend.priorityClassName | string | `""` | |
+| defaultBackend.readinessProbe.failureThreshold | int | `6` | |
+| defaultBackend.readinessProbe.initialDelaySeconds | int | `0` | |
+| defaultBackend.readinessProbe.periodSeconds | int | `5` | |
+| defaultBackend.readinessProbe.successThreshold | int | `1` | |
+| defaultBackend.readinessProbe.timeoutSeconds | int | `5` | |
+| defaultBackend.replicaCount | int | `1` | |
+| defaultBackend.resources | object | `{}` | |
+| defaultBackend.service.annotations | object | `{}` | |
+| defaultBackend.service.externalIPs | list | `[]` | List of IP addresses at which the default backend service is available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # |
+| defaultBackend.service.loadBalancerSourceRanges | list | `[]` | |
+| defaultBackend.service.servicePort | int | `80` | |
+| defaultBackend.service.type | string | `"ClusterIP"` | |
+| defaultBackend.serviceAccount.automountServiceAccountToken | bool | `true` | |
+| defaultBackend.serviceAccount.create | bool | `true` | |
+| defaultBackend.serviceAccount.name | string | `""` | |
+| defaultBackend.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # |
+| defaultBackend.updateStrategy | object | `{}` | The update strategy to apply to the Deployment or DaemonSet # |
+| dhParam | string | `""` | A base64-encoded Diffie-Hellman parameter. This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` # Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param |
+| imagePullSecrets | list | `[]` | Optional array of imagePullSecrets containing private registry credentials # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ |
+| podSecurityPolicy.enabled | bool | `false` | |
+| portNamePrefix | string | `""` | Prefix for TCP and UDP ports names in ingress controller service # Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration |
+| rbac.create | bool | `true` | |
+| rbac.scope | bool | `false` | |
+| revisionHistoryLimit | int | `10` | Rollback limit # |
+| serviceAccount.annotations | object | `{}` | Annotations for the controller service account |
+| serviceAccount.automountServiceAccountToken | bool | `true` | |
+| serviceAccount.create | bool | `true` | |
+| serviceAccount.name | string | `""` | |
+| tcp | object | `{}` | TCP service key-value pairs # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md # |
+| udp | object | `{}` | UDP service key-value pairs # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md # |
diff --git a/charts/ingress-nginx/README.md.gotmpl b/charts/ingress-nginx/README.md.gotmpl
new file mode 100644
index 0000000..4a35a40
--- /dev/null
+++ b/charts/ingress-nginx/README.md.gotmpl
@@ -0,0 +1,225 @@
+{{ template "chart.header" . }}
+[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer
+
+{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }}
+
+To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources.
+
+This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+{{ template "chart.requirementsSection" . }}
+
+## Get Repo Info
+
+```console
+helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
+helm repo update
+```
+
+## Install Chart
+
+**Important:** only helm3 is supported
+
+```console
+helm install [RELEASE_NAME] ingress-nginx/ingress-nginx
+```
+
+The command deploys ingress-nginx on the Kubernetes cluster in the default configuration.
+
+_See [configuration](#configuration) below._
+
+_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
+
+## Uninstall Chart
+
+```console
+helm uninstall [RELEASE_NAME]
+```
+
+This removes all the Kubernetes components associated with the chart and deletes the release.
+
+_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
+
+## Upgrading Chart
+
+```console
+helm upgrade [RELEASE_NAME] [CHART] --install
+```
+
+_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
+
+### Migrating from stable/nginx-ingress
+
+There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart:
+
+1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one
+1. For critical services in production that require zero-downtime, you will want to:
+ 1. [Install](#install-chart) a second Ingress controller
+ 1. Redirect your DNS traffic from the old controller to the new controller
+ 1. Log traffic from both controllers during this changeover
+ 1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it
+
+Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts.
+
+## Configuration
+
+See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands:
+
+```console
+helm show values ingress-nginx/ingress-nginx
+```
+
+### PodDisruptionBudget
+
+Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one,
+else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info.
+
+### Prometheus Metrics
+
+The Ingress-Nginx Controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`.
+
+You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`.
+Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`)
+
+### ingress-nginx nginx\_status page/stats server
+
+Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in Ingress-Nginx Controller:
+
+- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed
+- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost.
+ You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server
+
+### ExternalDNS Service Configuration
+
+Add an [ExternalDNS](https://github.com/kubernetes-sigs/external-dns) annotation to the LoadBalancer service:
+
+```yaml
+controller:
+ service:
+ annotations:
+ external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com.
+```
+
+### AWS L7 ELB with SSL Termination
+
+Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/ab3a789caae65eec4ad6e3b46b19750b481b6bce/deploy/aws/l7/service-l7.yaml):
+
+```yaml
+controller:
+ service:
+ targetPorts:
+ http: http
+ https: http
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX
+ service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http"
+ service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https"
+ service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600'
+```
+
+### Additional Internal Load Balancer
+
+This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application.
+
+By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL.
+
+You'll need to set both the following values:
+
+`controller.service.internal.enabled`
+`controller.service.internal.annotations`
+
+If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken.
+
+`controller.service.internal.annotations` varies with the cloud service you're using.
+
+Example for AWS:
+
+```yaml
+controller:
+ service:
+ internal:
+ enabled: true
+ annotations:
+ # Create internal ELB
+ service.beta.kubernetes.io/aws-load-balancer-internal: "true"
+ # Any other annotation can be declared here.
+```
+
+Example for GCE:
+
+```yaml
+controller:
+ service:
+ internal:
+ enabled: true
+ annotations:
+ # Create internal LB. More information: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing
+ # For GKE versions 1.17 and later
+ networking.gke.io/load-balancer-type: "Internal"
+ # For earlier versions
+ # cloud.google.com/load-balancer-type: "Internal"
+
+ # Any other annotation can be declared here.
+```
+
+Example for Azure:
+
+```yaml
+controller:
+ service:
+ annotations:
+ # Create internal LB
+ service.beta.kubernetes.io/azure-load-balancer-internal: "true"
+ # Any other annotation can be declared here.
+```
+
+Example for Oracle Cloud Infrastructure:
+
+```yaml
+controller:
+ service:
+ annotations:
+ # Create internal LB
+ service.beta.kubernetes.io/oci-load-balancer-internal: "true"
+ # Any other annotation can be declared here.
+```
+
+An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object.
+
+Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`.
+
+### Ingress Admission Webhooks
+
+With nginx-ingress-controller version 0.25+, the Ingress-Nginx Controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster.
+**This feature is enabled by default since 0.31.0.**
+
+With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521)
+
+#### How the Chart Configures the Hooks
+A validating and configuration requires the endpoint to which the request is sent to use TLS. It is possible to set up custom certificates to do this, but in most cases, a self-signed certificate is enough. The setup of this component requires some more complex orchestration when using helm. The steps are created to be idempotent and to allow turning the feature on and off without running into helm quirks.
+
+1. A pre-install hook provisions a certificate into the same namespace using a format compatible with provisioning using end user certificates. If the certificate already exists, the hook exits.
+2. The Ingress-Nginx Controller pod is configured to use a TLS proxy container, which will load that certificate.
+3. Validating and Mutating webhook configurations are created in the cluster.
+4. A post-install hook reads the CA from the secret created by step 1 and patches the Validating and Mutating webhook configurations. This process will allow a custom CA provisioned by some other process to also be patched into the webhook configurations. The chosen failure policy is also patched into the webhook configurations
+
+#### Alternatives
+It should be possible to use [cert-manager/cert-manager](https://github.com/cert-manager/cert-manager) if a more complete solution is required.
+
+You can enable automatic self-signed TLS certificate provisioning via cert-manager by setting the `controller.admissionWebhooks.certManager.enabled` value to true.
+
+Please ensure that cert-manager is correctly installed and configured.
+
+### Helm Error When Upgrading: spec.clusterIP: Invalid value: ""
+
+If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this:
+
+```console
+Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable
+```
+
+Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error.
+
+As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered.
+
+{{ template "chart.valuesSection" . }}
diff --git a/charts/ingress-nginx/changelog.md.gotmpl b/charts/ingress-nginx/changelog.md.gotmpl
new file mode 100644
index 0000000..de98856
--- /dev/null
+++ b/charts/ingress-nginx/changelog.md.gotmpl
@@ -0,0 +1,9 @@
+# Changelog
+
+This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
+
+### {{ .NewHelmChartVersion }}
+{{ with .HelmUpdates }}
+{{ range . }}* {{ . }}
+{{ end }}{{ end }}
+**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-{{ .PreviousHelmChartVersion }}...helm-chart-{{ .NewHelmChartVersion }}
diff --git a/charts/ingress-nginx/changelog/.gitkeep b/charts/ingress-nginx/changelog/.gitkeep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/charts/ingress-nginx/changelog/.gitkeep
diff --git a/charts/ingress-nginx/changelog/Changelog-4.5.2.md b/charts/ingress-nginx/changelog/Changelog-4.5.2.md
new file mode 100644
index 0000000..b6d8a3b
--- /dev/null
+++ b/charts/ingress-nginx/changelog/Changelog-4.5.2.md
@@ -0,0 +1,13 @@
+# Changelog
+
+This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
+
+### 4.5.2
+
+* add lint on chart before release (#9570)
+* ci: remove setup-helm step (#9404)
+* feat(helm): Optionally use cert-manager instead admission patch (#9279)
+* run helm release on main only and when the chart/value changes only (#9290)
+* Update Ingress-Nginx version controller-v1.6.4
+
+**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-4.4.3...helm-chart-4.5.2
diff --git a/charts/ingress-nginx/changelog/Changelog-4.6.0.md b/charts/ingress-nginx/changelog/Changelog-4.6.0.md
new file mode 100644
index 0000000..469aaba
--- /dev/null
+++ b/charts/ingress-nginx/changelog/Changelog-4.6.0.md
@@ -0,0 +1,24 @@
+# Changelog
+
+This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
+
+### 4.5.3
+
+* docs(helm): fix value key in readme for enabling certManager (#9640)
+* Upgrade alpine 3.17.2
+* Upgrade golang 1.20
+* Drop testing/support for Kubernetes 1.23
+* docs(helm): fix value key in readme for enabling certManager (#9640)
+* Update Ingress-Nginx version controller-v1.7.0
+* feat: OpenTelemetry module integration (#9062)
+* canary-weight-total annotation ignored in rule backends (#9729)
+* fix controller psp's volume config (#9740)
+* Fix several Helm YAML issues with extraModules and extraInitContainers (#9709)
+* Chart: Drop `controller.headers`, rework DH param secret. (#9659)
+* Deployment/DaemonSet: Label pods using `ingress-nginx.labels`. (#9732)
+* HPA: autoscaling/v2beta1 deprecated, bump apiVersion to v2 for defaultBackend (#9731)
+* Fix incorrect annotation name in upstream hashing configuration (#9617)
+
+* Update Ingress-Nginx version controller-v1.7.0
+
+**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-4.5.2...helm-chart-4.6.0
diff --git a/charts/ingress-nginx/changelog/Changelog-4.6.1.md b/charts/ingress-nginx/changelog/Changelog-4.6.1.md
new file mode 100644
index 0000000..57d99b8
--- /dev/null
+++ b/charts/ingress-nginx/changelog/Changelog-4.6.1.md
@@ -0,0 +1,11 @@
+# Changelog
+
+This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
+
+### 4.6.1
+
+* [helm] Support custom port configuration for internal service (#9846)
+* Adding resource type to default HPA configuration to resolve issues with Terraform helm chart usage (#9803)
+* Update Ingress-Nginx version controller-v1.7.1
+
+**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-4.6.0...helm-chart-4.6.1
diff --git a/charts/ingress-nginx/changelog/Changelog-4.7.0.md b/charts/ingress-nginx/changelog/Changelog-4.7.0.md
new file mode 100644
index 0000000..7399da7
--- /dev/null
+++ b/charts/ingress-nginx/changelog/Changelog-4.7.0.md
@@ -0,0 +1,14 @@
+# Changelog
+
+This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
+
+### 4.7.0
+
+* helm: Fix opentelemetry module installation for daemonset (#9792)
+* Update charts/* to keep project name display aligned (#9931)
+* HPA: Use capabilites & align manifests. (#9521)
+* PodDisruptionBudget spec logic update (#9904)
+* add option for annotations in PodDisruptionBudget (#9843)
+* Update Ingress-Nginx version controller-v1.8.0
+
+**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-4.6.1...helm-chart-4.7.0
diff --git a/charts/ingress-nginx/ci/controller-admission-tls-cert-manager-values.yaml b/charts/ingress-nginx/ci/controller-admission-tls-cert-manager-values.yaml
new file mode 100644
index 0000000..a13241c
--- /dev/null
+++ b/charts/ingress-nginx/ci/controller-admission-tls-cert-manager-values.yaml
@@ -0,0 +1,6 @@
+controller:
+ admissionWebhooks:
+ certManager:
+ enabled: true
+ service:
+ type: ClusterIP
diff --git a/charts/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml b/charts/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml
new file mode 100644
index 0000000..b28a232
--- /dev/null
+++ b/charts/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml
@@ -0,0 +1,7 @@
+controller:
+ watchIngressWithoutClass: true
+ ingressClassResource:
+ name: custom-nginx
+ enabled: true
+ default: true
+ controllerValue: "k8s.io/custom-nginx"
diff --git a/charts/ingress-nginx/ci/daemonset-customconfig-values.yaml b/charts/ingress-nginx/ci/daemonset-customconfig-values.yaml
new file mode 100644
index 0000000..4393a5b
--- /dev/null
+++ b/charts/ingress-nginx/ci/daemonset-customconfig-values.yaml
@@ -0,0 +1,14 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ kind: DaemonSet
+ allowSnippetAnnotations: false
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
+
+ config:
+ use-proxy-protocol: "true"
diff --git a/charts/ingress-nginx/ci/daemonset-customnodeport-values.yaml b/charts/ingress-nginx/ci/daemonset-customnodeport-values.yaml
new file mode 100644
index 0000000..1d94be2
--- /dev/null
+++ b/charts/ingress-nginx/ci/daemonset-customnodeport-values.yaml
@@ -0,0 +1,22 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+
+ service:
+ type: NodePort
+ nodePorts:
+ tcp:
+ 9000: 30090
+ udp:
+ 9001: 30091
+
+tcp:
+ 9000: "default/test:8080"
+
+udp:
+ 9001: "default/test:8080"
diff --git a/charts/ingress-nginx/ci/daemonset-extra-modules.yaml b/charts/ingress-nginx/ci/daemonset-extra-modules.yaml
new file mode 100644
index 0000000..f299dbf
--- /dev/null
+++ b/charts/ingress-nginx/ci/daemonset-extra-modules.yaml
@@ -0,0 +1,10 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ service:
+ type: ClusterIP
+ extraModules:
+ - name: opentelemetry
+ image: busybox
diff --git a/charts/ingress-nginx/ci/daemonset-headers-values.yaml b/charts/ingress-nginx/ci/daemonset-headers-values.yaml
new file mode 100644
index 0000000..ab7d47b
--- /dev/null
+++ b/charts/ingress-nginx/ci/daemonset-headers-values.yaml
@@ -0,0 +1,14 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ addHeaders:
+ X-Frame-Options: deny
+ proxySetHeaders:
+ X-Forwarded-Proto: https
+ service:
+ type: ClusterIP
diff --git a/charts/ingress-nginx/ci/daemonset-internal-lb-values.yaml b/charts/ingress-nginx/ci/daemonset-internal-lb-values.yaml
new file mode 100644
index 0000000..0a200a7
--- /dev/null
+++ b/charts/ingress-nginx/ci/daemonset-internal-lb-values.yaml
@@ -0,0 +1,14 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
+ internal:
+ enabled: true
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-internal: "true"
diff --git a/charts/ingress-nginx/ci/daemonset-nodeport-values.yaml b/charts/ingress-nginx/ci/daemonset-nodeport-values.yaml
new file mode 100644
index 0000000..3b7aa2f
--- /dev/null
+++ b/charts/ingress-nginx/ci/daemonset-nodeport-values.yaml
@@ -0,0 +1,10 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: NodePort
diff --git a/charts/ingress-nginx/ci/daemonset-podannotations-values.yaml b/charts/ingress-nginx/ci/daemonset-podannotations-values.yaml
new file mode 100644
index 0000000..0b55306
--- /dev/null
+++ b/charts/ingress-nginx/ci/daemonset-podannotations-values.yaml
@@ -0,0 +1,17 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ metrics:
+ enabled: true
+ service:
+ type: ClusterIP
+ podAnnotations:
+ prometheus.io/path: /metrics
+ prometheus.io/port: "10254"
+ prometheus.io/scheme: http
+ prometheus.io/scrape: "true"
diff --git a/charts/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml b/charts/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml
new file mode 100644
index 0000000..acd86a7
--- /dev/null
+++ b/charts/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml
@@ -0,0 +1,20 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
+ tcp:
+ configMapNamespace: default
+ udp:
+ configMapNamespace: default
+
+tcp:
+ 9000: "default/test:8080"
+
+udp:
+ 9001: "default/test:8080"
diff --git a/charts/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml b/charts/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml
new file mode 100644
index 0000000..90b0f57
--- /dev/null
+++ b/charts/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml
@@ -0,0 +1,18 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
+
+tcp:
+ 9000: "default/test:8080"
+
+udp:
+ 9001: "default/test:8080"
+
+portNamePrefix: "port"
diff --git a/charts/ingress-nginx/ci/daemonset-tcp-udp-values.yaml b/charts/ingress-nginx/ci/daemonset-tcp-udp-values.yaml
new file mode 100644
index 0000000..25ee64d
--- /dev/null
+++ b/charts/ingress-nginx/ci/daemonset-tcp-udp-values.yaml
@@ -0,0 +1,16 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
+
+tcp:
+ 9000: "default/test:8080"
+
+udp:
+ 9001: "default/test:8080"
diff --git a/charts/ingress-nginx/ci/daemonset-tcp-values.yaml b/charts/ingress-nginx/ci/daemonset-tcp-values.yaml
new file mode 100644
index 0000000..380c8b4
--- /dev/null
+++ b/charts/ingress-nginx/ci/daemonset-tcp-values.yaml
@@ -0,0 +1,14 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
+
+tcp:
+ 9000: "default/test:8080"
+ 9001: "default/test:8080"
diff --git a/charts/ingress-nginx/ci/deamonset-default-values.yaml b/charts/ingress-nginx/ci/deamonset-default-values.yaml
new file mode 100644
index 0000000..82fa23e
--- /dev/null
+++ b/charts/ingress-nginx/ci/deamonset-default-values.yaml
@@ -0,0 +1,10 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
diff --git a/charts/ingress-nginx/ci/deamonset-metrics-values.yaml b/charts/ingress-nginx/ci/deamonset-metrics-values.yaml
new file mode 100644
index 0000000..cb3cb54
--- /dev/null
+++ b/charts/ingress-nginx/ci/deamonset-metrics-values.yaml
@@ -0,0 +1,12 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ metrics:
+ enabled: true
+ service:
+ type: ClusterIP
diff --git a/charts/ingress-nginx/ci/deamonset-psp-values.yaml b/charts/ingress-nginx/ci/deamonset-psp-values.yaml
new file mode 100644
index 0000000..8026a63
--- /dev/null
+++ b/charts/ingress-nginx/ci/deamonset-psp-values.yaml
@@ -0,0 +1,13 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
+
+podSecurityPolicy:
+ enabled: true
diff --git a/charts/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml b/charts/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml
new file mode 100644
index 0000000..fccdb13
--- /dev/null
+++ b/charts/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml
@@ -0,0 +1,13 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: true
+ service:
+ type: ClusterIP
+
+podSecurityPolicy:
+ enabled: true
diff --git a/charts/ingress-nginx/ci/deamonset-webhook-values.yaml b/charts/ingress-nginx/ci/deamonset-webhook-values.yaml
new file mode 100644
index 0000000..54d364d
--- /dev/null
+++ b/charts/ingress-nginx/ci/deamonset-webhook-values.yaml
@@ -0,0 +1,10 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: true
+ service:
+ type: ClusterIP
diff --git a/charts/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml b/charts/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml
new file mode 100644
index 0000000..dca3f35
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml
@@ -0,0 +1,14 @@
+controller:
+ autoscaling:
+ enabled: true
+ behavior:
+ scaleDown:
+ stabilizationWindowSeconds: 300
+ policies:
+ - type: Pods
+ value: 1
+ periodSeconds: 180
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
diff --git a/charts/ingress-nginx/ci/deployment-autoscaling-values.yaml b/charts/ingress-nginx/ci/deployment-autoscaling-values.yaml
new file mode 100644
index 0000000..b8b3ac6
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-autoscaling-values.yaml
@@ -0,0 +1,11 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ autoscaling:
+ enabled: true
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
diff --git a/charts/ingress-nginx/ci/deployment-customconfig-values.yaml b/charts/ingress-nginx/ci/deployment-customconfig-values.yaml
new file mode 100644
index 0000000..1749418
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-customconfig-values.yaml
@@ -0,0 +1,12 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ config:
+ use-proxy-protocol: "true"
+ allowSnippetAnnotations: false
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
diff --git a/charts/ingress-nginx/ci/deployment-customnodeport-values.yaml b/charts/ingress-nginx/ci/deployment-customnodeport-values.yaml
new file mode 100644
index 0000000..a564eaf
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-customnodeport-values.yaml
@@ -0,0 +1,20 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: NodePort
+ nodePorts:
+ tcp:
+ 9000: 30090
+ udp:
+ 9001: 30091
+
+tcp:
+ 9000: "default/test:8080"
+
+udp:
+ 9001: "default/test:8080"
diff --git a/charts/ingress-nginx/ci/deployment-default-values.yaml b/charts/ingress-nginx/ci/deployment-default-values.yaml
new file mode 100644
index 0000000..9f46b4e
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-default-values.yaml
@@ -0,0 +1,8 @@
+# Left blank to test default values
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ service:
+ type: ClusterIP
diff --git a/charts/ingress-nginx/ci/deployment-extra-modules-default-container-sec-context.yaml b/charts/ingress-nginx/ci/deployment-extra-modules-default-container-sec-context.yaml
new file mode 100644
index 0000000..2310c34
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-extra-modules-default-container-sec-context.yaml
@@ -0,0 +1,12 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ service:
+ type: ClusterIP
+ containerSecurityContext:
+ allowPrivilegeEscalation: false
+ extraModules:
+ - name: opentelemetry
+ image: busybox
diff --git a/charts/ingress-nginx/ci/deployment-extra-modules-specific-container-sec-context.yaml b/charts/ingress-nginx/ci/deployment-extra-modules-specific-container-sec-context.yaml
new file mode 100644
index 0000000..bd2f011
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-extra-modules-specific-container-sec-context.yaml
@@ -0,0 +1,12 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ service:
+ type: ClusterIP
+ extraModules:
+ - name: opentelemetry
+ image: busybox
+ containerSecurityContext:
+ allowPrivilegeEscalation: false
diff --git a/charts/ingress-nginx/ci/deployment-extra-modules.yaml b/charts/ingress-nginx/ci/deployment-extra-modules.yaml
new file mode 100644
index 0000000..ec59235
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-extra-modules.yaml
@@ -0,0 +1,10 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ service:
+ type: ClusterIP
+ extraModules:
+ - name: opentelemetry
+ image: busybox
diff --git a/charts/ingress-nginx/ci/deployment-headers-values.yaml b/charts/ingress-nginx/ci/deployment-headers-values.yaml
new file mode 100644
index 0000000..17a11ac
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-headers-values.yaml
@@ -0,0 +1,13 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ addHeaders:
+ X-Frame-Options: deny
+ proxySetHeaders:
+ X-Forwarded-Proto: https
+ service:
+ type: ClusterIP
diff --git a/charts/ingress-nginx/ci/deployment-internal-lb-values.yaml b/charts/ingress-nginx/ci/deployment-internal-lb-values.yaml
new file mode 100644
index 0000000..663ccb9
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-internal-lb-values.yaml
@@ -0,0 +1,19 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
+ internal:
+ enabled: true
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-internal: "true"
+ ports:
+ http: 443
+ https: 80
+ targetPorts:
+ http: 443
+ https: 80
diff --git a/charts/ingress-nginx/ci/deployment-metrics-values.yaml b/charts/ingress-nginx/ci/deployment-metrics-values.yaml
new file mode 100644
index 0000000..9209ad5
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-metrics-values.yaml
@@ -0,0 +1,11 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ metrics:
+ enabled: true
+ service:
+ type: ClusterIP
diff --git a/charts/ingress-nginx/ci/deployment-nodeport-values.yaml b/charts/ingress-nginx/ci/deployment-nodeport-values.yaml
new file mode 100644
index 0000000..cd9b323
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-nodeport-values.yaml
@@ -0,0 +1,9 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: NodePort
diff --git a/charts/ingress-nginx/ci/deployment-podannotations-values.yaml b/charts/ingress-nginx/ci/deployment-podannotations-values.yaml
new file mode 100644
index 0000000..b48d93c
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-podannotations-values.yaml
@@ -0,0 +1,16 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ metrics:
+ enabled: true
+ service:
+ type: ClusterIP
+ podAnnotations:
+ prometheus.io/path: /metrics
+ prometheus.io/port: "10254"
+ prometheus.io/scheme: http
+ prometheus.io/scrape: "true"
diff --git a/charts/ingress-nginx/ci/deployment-psp-values.yaml b/charts/ingress-nginx/ci/deployment-psp-values.yaml
new file mode 100644
index 0000000..2f332a7
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-psp-values.yaml
@@ -0,0 +1,10 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ service:
+ type: ClusterIP
+
+podSecurityPolicy:
+ enabled: true
diff --git a/charts/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml b/charts/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml
new file mode 100644
index 0000000..c51a4e9
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml
@@ -0,0 +1,19 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
+ tcp:
+ configMapNamespace: default
+ udp:
+ configMapNamespace: default
+
+tcp:
+ 9000: "default/test:8080"
+
+udp:
+ 9001: "default/test:8080"
diff --git a/charts/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml b/charts/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml
new file mode 100644
index 0000000..56323c5
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml
@@ -0,0 +1,17 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
+
+tcp:
+ 9000: "default/test:8080"
+
+udp:
+ 9001: "default/test:8080"
+
+portNamePrefix: "port"
diff --git a/charts/ingress-nginx/ci/deployment-tcp-udp-values.yaml b/charts/ingress-nginx/ci/deployment-tcp-udp-values.yaml
new file mode 100644
index 0000000..5b45b69
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-tcp-udp-values.yaml
@@ -0,0 +1,15 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
+
+tcp:
+ 9000: "default/test:8080"
+
+udp:
+ 9001: "default/test:8080"
diff --git a/charts/ingress-nginx/ci/deployment-tcp-values.yaml b/charts/ingress-nginx/ci/deployment-tcp-values.yaml
new file mode 100644
index 0000000..ac0b6e6
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-tcp-values.yaml
@@ -0,0 +1,11 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ service:
+ type: ClusterIP
+
+tcp:
+ 9000: "default/test:8080"
+ 9001: "default/test:8080"
diff --git a/charts/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml b/charts/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml
new file mode 100644
index 0000000..6195bb3
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml
@@ -0,0 +1,12 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: true
+ service:
+ type: ClusterIP
+
+podSecurityPolicy:
+ enabled: true
diff --git a/charts/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml b/charts/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml
new file mode 100644
index 0000000..95487b0
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml
@@ -0,0 +1,12 @@
+controller:
+ service:
+ type: ClusterIP
+ admissionWebhooks:
+ enabled: true
+ extraEnvs:
+ - name: FOO
+ value: foo
+ - name: TEST
+ value: test
+ patch:
+ enabled: true
diff --git a/charts/ingress-nginx/ci/deployment-webhook-resources-values.yaml b/charts/ingress-nginx/ci/deployment-webhook-resources-values.yaml
new file mode 100644
index 0000000..49ebbb0
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-webhook-resources-values.yaml
@@ -0,0 +1,23 @@
+controller:
+ service:
+ type: ClusterIP
+ admissionWebhooks:
+ enabled: true
+ createSecretJob:
+ resources:
+ limits:
+ cpu: 10m
+ memory: 20Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ patchWebhookJob:
+ resources:
+ limits:
+ cpu: 10m
+ memory: 20Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ patch:
+ enabled: true
diff --git a/charts/ingress-nginx/ci/deployment-webhook-values.yaml b/charts/ingress-nginx/ci/deployment-webhook-values.yaml
new file mode 100644
index 0000000..76669a5
--- /dev/null
+++ b/charts/ingress-nginx/ci/deployment-webhook-values.yaml
@@ -0,0 +1,9 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: true
+ service:
+ type: ClusterIP
diff --git a/charts/ingress-nginx/templates/NOTES.txt b/charts/ingress-nginx/templates/NOTES.txt
new file mode 100644
index 0000000..9fe35c7
--- /dev/null
+++ b/charts/ingress-nginx/templates/NOTES.txt
@@ -0,0 +1,73 @@
+The ingress-nginx controller has been installed.
+
+{{- if contains "NodePort" .Values.controller.service.type }}
+Get the application URL by running these commands:
+
+{{- if (not (empty .Values.controller.service.nodePorts.http)) }}
+ export HTTP_NODE_PORT={{ .Values.controller.service.nodePorts.http }}
+{{- else }}
+ export HTTP_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[0].nodePort}" {{ include "ingress-nginx.controller.fullname" . }})
+{{- end }}
+{{- if (not (empty .Values.controller.service.nodePorts.https)) }}
+ export HTTPS_NODE_PORT={{ .Values.controller.service.nodePorts.https }}
+{{- else }}
+ export HTTPS_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[1].nodePort}" {{ include "ingress-nginx.controller.fullname" . }})
+{{- end }}
+ export NODE_IP=$(kubectl --namespace {{ .Release.Namespace }} get nodes -o jsonpath="{.items[0].status.addresses[1].address}")
+
+ echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP."
+ echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS."
+{{- else if contains "LoadBalancer" .Values.controller.service.type }}
+It may take a few minutes for the LoadBalancer IP to be available.
+You can watch the status by running 'kubectl --namespace {{ .Release.Namespace }} get services -o wide -w {{ include "ingress-nginx.controller.fullname" . }}'
+{{- else if contains "ClusterIP" .Values.controller.service.type }}
+Get the application URL by running these commands:
+ export POD_NAME=$(kubectl --namespace {{ .Release.Namespace }} get pods -o jsonpath="{.items[0].metadata.name}" -l "app={{ template "ingress-nginx.name" . }},component={{ .Values.controller.name }},release={{ .Release.Name }}")
+ kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
+ echo "Visit http://127.0.0.1:8080 to access your application."
+{{- end }}
+
+An example Ingress that makes use of the controller:
+
+{{- $isV1 := semverCompare ">=1" .Chart.AppVersion}}
+ apiVersion: networking.k8s.io/v1
+ kind: Ingress
+ metadata:
+ name: example
+ namespace: foo
+ {{- if eq $isV1 false }}
+ annotations:
+ kubernetes.io/ingress.class: {{ .Values.controller.ingressClass }}
+ {{- end }}
+ spec:
+ {{- if $isV1 }}
+ ingressClassName: {{ .Values.controller.ingressClassResource.name }}
+ {{- end }}
+ rules:
+ - host: www.example.com
+ http:
+ paths:
+ - pathType: Prefix
+ backend:
+ service:
+ name: exampleService
+ port:
+ number: 80
+ path: /
+ # This section is only required if TLS is to be enabled for the Ingress
+ tls:
+ - hosts:
+ - www.example.com
+ secretName: example-tls
+
+If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
+
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: example-tls
+ namespace: foo
+ data:
+ tls.crt: <base64 encoded cert>
+ tls.key: <base64 encoded key>
+ type: kubernetes.io/tls
diff --git a/charts/ingress-nginx/templates/_helpers.tpl b/charts/ingress-nginx/templates/_helpers.tpl
new file mode 100644
index 0000000..7db5b2c
--- /dev/null
+++ b/charts/ingress-nginx/templates/_helpers.tpl
@@ -0,0 +1,212 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "ingress-nginx.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "ingress-nginx.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "ingress-nginx.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+
+{{/*
+Container SecurityContext.
+*/}}
+{{- define "controller.containerSecurityContext" -}}
+{{- if .Values.controller.containerSecurityContext -}}
+{{- toYaml .Values.controller.containerSecurityContext -}}
+{{- else -}}
+capabilities:
+ drop:
+ - ALL
+ add:
+ - NET_BIND_SERVICE
+ {{- if .Values.controller.image.chroot }}
+ - SYS_CHROOT
+ {{- end }}
+runAsUser: {{ .Values.controller.image.runAsUser }}
+allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }}
+{{- end }}
+{{- end -}}
+
+{{/*
+Get specific image
+*/}}
+{{- define "ingress-nginx.image" -}}
+{{- if .chroot -}}
+{{- printf "%s-chroot" .image -}}
+{{- else -}}
+{{- printf "%s" .image -}}
+{{- end }}
+{{- end -}}
+
+{{/*
+Get specific image digest
+*/}}
+{{- define "ingress-nginx.imageDigest" -}}
+{{- if .chroot -}}
+{{- if .digestChroot -}}
+{{- printf "@%s" .digestChroot -}}
+{{- end }}
+{{- else -}}
+{{ if .digest -}}
+{{- printf "@%s" .digest -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified controller name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "ingress-nginx.controller.fullname" -}}
+{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.controller.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Construct a unique electionID.
+Users can provide an override for an explicit electionID if they want via `.Values.controller.electionID`
+*/}}
+{{- define "ingress-nginx.controller.electionID" -}}
+{{- $defElectionID := printf "%s-leader" (include "ingress-nginx.fullname" .) -}}
+{{- $electionID := default $defElectionID .Values.controller.electionID -}}
+{{- print $electionID -}}
+{{- end -}}
+
+{{/*
+Construct the path for the publish-service.
+
+By convention this will simply use the <namespace>/<controller-name> to match the name of the
+service generated.
+
+Users can provide an override for an explicit service they want bound via `.Values.controller.publishService.pathOverride`
+
+*/}}
+{{- define "ingress-nginx.controller.publishServicePath" -}}
+{{- $defServiceName := printf "%s/%s" "$(POD_NAMESPACE)" (include "ingress-nginx.controller.fullname" .) -}}
+{{- $servicePath := default $defServiceName .Values.controller.publishService.pathOverride }}
+{{- print $servicePath | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified default backend name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "ingress-nginx.defaultBackend.fullname" -}}
+{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "ingress-nginx.labels" -}}
+helm.sh/chart: {{ include "ingress-nginx.chart" . }}
+{{ include "ingress-nginx.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/part-of: {{ template "ingress-nginx.name" . }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- if .Values.commonLabels}}
+{{ toYaml .Values.commonLabels }}
+{{- end }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "ingress-nginx.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "ingress-nginx.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create the name of the controller service account to use
+*/}}
+{{- define "ingress-nginx.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "ingress-nginx.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the backend service account to use - only used when podsecuritypolicy is also enabled
+*/}}
+{{- define "ingress-nginx.defaultBackend.serviceAccountName" -}}
+{{- if .Values.defaultBackend.serviceAccount.create -}}
+ {{ default (printf "%s-backend" (include "ingress-nginx.fullname" .)) .Values.defaultBackend.serviceAccount.name }}
+{{- else -}}
+ {{ default "default-backend" .Values.defaultBackend.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiGroup for PodSecurityPolicy.
+*/}}
+{{- define "podSecurityPolicy.apiGroup" -}}
+{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "policy" -}}
+{{- else -}}
+{{- print "extensions" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Check the ingress controller version tag is at most three versions behind the last release
+*/}}
+{{- define "isControllerTagValid" -}}
+{{- if not (semverCompare ">=0.27.0-0" .Values.controller.image.tag) -}}
+{{- fail "Controller container image tag should be 0.27.0 or higher" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+IngressClass parameters.
+*/}}
+{{- define "ingressClass.parameters" -}}
+ {{- if .Values.controller.ingressClassResource.parameters -}}
+ parameters:
+{{ toYaml .Values.controller.ingressClassResource.parameters | indent 4}}
+ {{ end }}
+{{- end -}}
+
+{{/*
+Extra modules.
+*/}}
+{{- define "extraModules" -}}
+
+- name: {{ .name }}
+ image: {{ .image }}
+ command: ['sh', '-c', '/usr/local/bin/init_module.sh']
+ {{- if (.containerSecurityContext) }}
+ securityContext: {{ .containerSecurityContext | toYaml | nindent 4 }}
+ {{- end }}
+ volumeMounts:
+ - name: {{ toYaml "modules"}}
+ mountPath: {{ toYaml "/modules_mount"}}
+
+{{- end -}}
diff --git a/charts/ingress-nginx/templates/_params.tpl b/charts/ingress-nginx/templates/_params.tpl
new file mode 100644
index 0000000..a1aef01
--- /dev/null
+++ b/charts/ingress-nginx/templates/_params.tpl
@@ -0,0 +1,65 @@
+{{- define "ingress-nginx.params" -}}
+- /nginx-ingress-controller
+{{- if .Values.defaultBackend.enabled }}
+- --default-backend-service=$(POD_NAMESPACE)/{{ include "ingress-nginx.defaultBackend.fullname" . }}
+{{- end }}
+{{- if and .Values.controller.publishService.enabled .Values.controller.service.enabled }}
+{{- if .Values.controller.service.external.enabled }}
+- --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }}
+{{- else if .Values.controller.service.internal.enabled }}
+- --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }}-internal
+{{- end }}
+{{- end }}
+- --election-id={{ include "ingress-nginx.controller.electionID" . }}
+- --controller-class={{ .Values.controller.ingressClassResource.controllerValue }}
+{{- if .Values.controller.ingressClass }}
+- --ingress-class={{ .Values.controller.ingressClass }}
+{{- end }}
+- --configmap={{ default "$(POD_NAMESPACE)" .Values.controller.configMapNamespace }}/{{ include "ingress-nginx.controller.fullname" . }}
+{{- if .Values.tcp }}
+- --tcp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.tcp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-tcp
+{{- end }}
+{{- if .Values.udp }}
+- --udp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.udp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-udp
+{{- end }}
+{{- if .Values.controller.scope.enabled }}
+- --watch-namespace={{ default "$(POD_NAMESPACE)" .Values.controller.scope.namespace }}
+{{- end }}
+{{- if and (not .Values.controller.scope.enabled) .Values.controller.scope.namespaceSelector }}
+- --watch-namespace-selector={{ default "" .Values.controller.scope.namespaceSelector }}
+{{- end }}
+{{- if and .Values.controller.reportNodeInternalIp .Values.controller.hostNetwork }}
+- --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }}
+{{- end }}
+{{- if .Values.controller.admissionWebhooks.enabled }}
+- --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }}
+- --validating-webhook-certificate={{ .Values.controller.admissionWebhooks.certificate }}
+- --validating-webhook-key={{ .Values.controller.admissionWebhooks.key }}
+{{- end }}
+{{- if .Values.controller.maxmindLicenseKey }}
+- --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }}
+{{- end }}
+{{- if .Values.controller.healthCheckHost }}
+- --healthz-host={{ .Values.controller.healthCheckHost }}
+{{- end }}
+{{- if not (eq .Values.controller.healthCheckPath "/healthz") }}
+- --health-check-path={{ .Values.controller.healthCheckPath }}
+{{- end }}
+{{- if .Values.controller.ingressClassByName }}
+- --ingress-class-by-name=true
+{{- end }}
+{{- if .Values.controller.watchIngressWithoutClass }}
+- --watch-ingress-without-class=true
+{{- end }}
+{{- if .Values.controller.enableTopologyAwareRouting }}
+- --enable-topology-aware-routing=true
+{{- end }}
+{{- range $key, $value := .Values.controller.extraArgs }}
+{{- /* Accept keys without values or with false as value */}}
+{{- if eq ($value | quote | len) 2 }}
+- --{{ $key }}
+{{- else }}
+- --{{ $key }}={{ $value }}
+{{- end }}
+{{- end }}
+{{- end -}}
diff --git a/charts/ingress-nginx/templates/admission-webhooks/cert-manager.yaml b/charts/ingress-nginx/templates/admission-webhooks/cert-manager.yaml
new file mode 100644
index 0000000..55fab47
--- /dev/null
+++ b/charts/ingress-nginx/templates/admission-webhooks/cert-manager.yaml
@@ -0,0 +1,63 @@
+{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.certManager.enabled -}}
+{{- if not .Values.controller.admissionWebhooks.certManager.issuerRef -}}
+# Create a selfsigned Issuer, in order to create a root CA certificate for
+# signing webhook serving certificates
+apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-self-signed-issuer
+ namespace: {{ .Release.Namespace }}
+spec:
+ selfSigned: {}
+---
+# Generate a CA Certificate used to sign certificates for the webhook
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-root-cert
+ namespace: {{ .Release.Namespace }}
+spec:
+ secretName: {{ include "ingress-nginx.fullname" . }}-root-cert
+ duration: {{ .Values.controller.admissionWebhooks.certManager.rootCert.duration | default "43800h0m0s" | quote }}
+ issuerRef:
+ name: {{ include "ingress-nginx.fullname" . }}-self-signed-issuer
+ commonName: "ca.webhook.ingress-nginx"
+ isCA: true
+ subject:
+ organizations:
+ - ingress-nginx
+---
+# Create an Issuer that uses the above generated CA certificate to issue certs
+apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-root-issuer
+ namespace: {{ .Release.Namespace }}
+spec:
+ ca:
+ secretName: {{ include "ingress-nginx.fullname" . }}-root-cert
+{{- end }}
+---
+# generate a server certificate for the apiservices to use
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+ namespace: {{ .Release.Namespace }}
+spec:
+ secretName: {{ include "ingress-nginx.fullname" . }}-admission
+ duration: {{ .Values.controller.admissionWebhooks.certManager.admissionCert.duration | default "8760h0m0s" | quote }}
+ issuerRef:
+ {{- if .Values.controller.admissionWebhooks.certManager.issuerRef }}
+ {{- toYaml .Values.controller.admissionWebhooks.certManager.issuerRef | nindent 4 }}
+ {{- else }}
+ name: {{ include "ingress-nginx.fullname" . }}-root-issuer
+ {{- end }}
+ dnsNames:
+ - {{ include "ingress-nginx.controller.fullname" . }}-admission
+ - {{ include "ingress-nginx.controller.fullname" . }}-admission.{{ .Release.Namespace }}
+ - {{ include "ingress-nginx.controller.fullname" . }}-admission.{{ .Release.Namespace }}.svc
+ subject:
+ organizations:
+ - ingress-nginx-admission
+{{- end -}}
diff --git a/charts/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml b/charts/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
new file mode 100644
index 0000000..f9ec709
--- /dev/null
+++ b/charts/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
@@ -0,0 +1,34 @@
+{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled (not .Values.controller.admissionWebhooks.certManager.enabled) -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: admission-webhook
+ {{- with .Values.controller.admissionWebhooks.patch.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+rules:
+ - apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - validatingwebhookconfigurations
+ verbs:
+ - get
+ - update
+{{- if .Values.podSecurityPolicy.enabled }}
+ - apiGroups: ['extensions']
+ resources: ['podsecuritypolicies']
+ verbs: ['use']
+ resourceNames:
+ {{- with .Values.controller.admissionWebhooks.existingPsp }}
+ - {{ . }}
+ {{- else }}
+ - {{ include "ingress-nginx.fullname" . }}-admission
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml b/charts/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
new file mode 100644
index 0000000..8719532
--- /dev/null
+++ b/charts/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
@@ -0,0 +1,23 @@
+{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled (not .Values.controller.admissionWebhooks.certManager.enabled) -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: admission-webhook
+ {{- with .Values.controller.admissionWebhooks.patch.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+subjects:
+ - kind: ServiceAccount
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+ namespace: {{ .Release.Namespace | quote }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml b/charts/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
new file mode 100644
index 0000000..d93433e
--- /dev/null
+++ b/charts/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
@@ -0,0 +1,80 @@
+{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled (not .Values.controller.admissionWebhooks.certManager.enabled) -}}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission-create
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ {{- with .Values.controller.admissionWebhooks.annotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: admission-webhook
+ {{- with .Values.controller.admissionWebhooks.patch.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }}
+ # Alpha feature since k8s 1.12
+ ttlSecondsAfterFinished: 0
+{{- end }}
+ template:
+ metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission-create
+ {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }}
+ annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 8 }}
+ app.kubernetes.io/component: admission-webhook
+ {{- with .Values.controller.admissionWebhooks.patch.labels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }}
+ priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }}
+ {{- end }}
+ {{- if .Values.imagePullSecrets }}
+ imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: create
+ {{- with .Values.controller.admissionWebhooks.patch.image }}
+ image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}"
+ {{- end }}
+ imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }}
+ args:
+ - create
+ - --host={{ include "ingress-nginx.controller.fullname" . }}-admission,{{ include "ingress-nginx.controller.fullname" . }}-admission.$(POD_NAMESPACE).svc
+ - --namespace=$(POD_NAMESPACE)
+ - --secret-name={{ include "ingress-nginx.fullname" . }}-admission
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- if .Values.controller.admissionWebhooks.extraEnvs }}
+ {{- toYaml .Values.controller.admissionWebhooks.extraEnvs | nindent 12 }}
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.createSecretJob.securityContext }}
+ securityContext: {{ toYaml .Values.controller.admissionWebhooks.createSecretJob.securityContext | nindent 12 }}
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.createSecretJob.resources }}
+ resources: {{ toYaml .Values.controller.admissionWebhooks.createSecretJob.resources | nindent 12 }}
+ {{- end }}
+ restartPolicy: OnFailure
+ serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission
+ {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }}
+ nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.patch.tolerations }}
+ tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.patch.securityContext }}
+ securityContext:
+ {{- toYaml .Values.controller.admissionWebhooks.patch.securityContext | nindent 8 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml b/charts/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
new file mode 100644
index 0000000..0fa3ff9
--- /dev/null
+++ b/charts/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
@@ -0,0 +1,82 @@
+{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled (not .Values.controller.admissionWebhooks.certManager.enabled) -}}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission-patch
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ "helm.sh/hook": post-install,post-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ {{- with .Values.controller.admissionWebhooks.annotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: admission-webhook
+ {{- with .Values.controller.admissionWebhooks.patch.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }}
+ # Alpha feature since k8s 1.12
+ ttlSecondsAfterFinished: 0
+{{- end }}
+ template:
+ metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission-patch
+ {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }}
+ annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 8 }}
+ app.kubernetes.io/component: admission-webhook
+ {{- with .Values.controller.admissionWebhooks.patch.labels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }}
+ priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }}
+ {{- end }}
+ {{- if .Values.imagePullSecrets }}
+ imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: patch
+ {{- with .Values.controller.admissionWebhooks.patch.image }}
+ image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}"
+ {{- end }}
+ imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }}
+ args:
+ - patch
+ - --webhook-name={{ include "ingress-nginx.fullname" . }}-admission
+ - --namespace=$(POD_NAMESPACE)
+ - --patch-mutating=false
+ - --secret-name={{ include "ingress-nginx.fullname" . }}-admission
+ - --patch-failure-policy={{ .Values.controller.admissionWebhooks.failurePolicy }}
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- if .Values.controller.admissionWebhooks.extraEnvs }}
+ {{- toYaml .Values.controller.admissionWebhooks.extraEnvs | nindent 12 }}
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.patchWebhookJob.securityContext }}
+ securityContext: {{ toYaml .Values.controller.admissionWebhooks.patchWebhookJob.securityContext | nindent 12 }}
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.patchWebhookJob.resources }}
+ resources: {{ toYaml .Values.controller.admissionWebhooks.patchWebhookJob.resources | nindent 12 }}
+ {{- end }}
+ restartPolicy: OnFailure
+ serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission
+ {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }}
+ nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.patch.tolerations }}
+ tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.patch.securityContext }}
+ securityContext:
+ {{- toYaml .Values.controller.admissionWebhooks.patch.securityContext | nindent 8 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/admission-webhooks/job-patch/networkpolicy.yaml b/charts/ingress-nginx/templates/admission-webhooks/job-patch/networkpolicy.yaml
new file mode 100644
index 0000000..08b3225
--- /dev/null
+++ b/charts/ingress-nginx/templates/admission-webhooks/job-patch/networkpolicy.yaml
@@ -0,0 +1,26 @@
+{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.networkPolicyEnabled }}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: admission-webhook
+ {{- with .Values.controller.admissionWebhooks.patch.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels:
+ {{- include "ingress-nginx.labels" . | nindent 6 }}
+ app.kubernetes.io/component: admission-webhook
+ policyTypes:
+ - Ingress
+ - Egress
+ egress:
+ - {}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml b/charts/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml
new file mode 100644
index 0000000..e19c955
--- /dev/null
+++ b/charts/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml
@@ -0,0 +1,41 @@
+{{- if (semverCompare "<1.25.0-0" .Capabilities.KubeVersion.Version) }}
+{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled .Values.podSecurityPolicy.enabled (empty .Values.controller.admissionWebhooks.existingPsp) -}}
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: admission-webhook
+ {{- with .Values.controller.admissionWebhooks.patch.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ allowPrivilegeEscalation: false
+ fsGroup:
+ ranges:
+ - max: 65535
+ min: 1
+ rule: MustRunAs
+ requiredDropCapabilities:
+ - ALL
+ runAsUser:
+ rule: MustRunAsNonRoot
+ seLinux:
+ rule: RunAsAny
+ supplementalGroups:
+ ranges:
+ - max: 65535
+ min: 1
+ rule: MustRunAs
+ volumes:
+ - configMap
+ - emptyDir
+ - projected
+ - secret
+ - downwardAPI
+{{- end }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml b/charts/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
new file mode 100644
index 0000000..ea7c208
--- /dev/null
+++ b/charts/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
@@ -0,0 +1,24 @@
+{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled (not .Values.controller.admissionWebhooks.certManager.enabled) -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: admission-webhook
+ {{- with .Values.controller.admissionWebhooks.patch.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - create
+{{- end }}
diff --git a/charts/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml b/charts/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
new file mode 100644
index 0000000..60c3f4f
--- /dev/null
+++ b/charts/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
@@ -0,0 +1,24 @@
+{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled (not .Values.controller.admissionWebhooks.certManager.enabled) -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: admission-webhook
+ {{- with .Values.controller.admissionWebhooks.patch.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+subjects:
+ - kind: ServiceAccount
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+ namespace: {{ .Release.Namespace | quote }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml b/charts/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
new file mode 100644
index 0000000..00be54e
--- /dev/null
+++ b/charts/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
@@ -0,0 +1,16 @@
+{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled (not .Values.controller.admissionWebhooks.certManager.enabled) -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: admission-webhook
+ {{- with .Values.controller.admissionWebhooks.patch.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml b/charts/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
new file mode 100644
index 0000000..f27244d
--- /dev/null
+++ b/charts/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
@@ -0,0 +1,53 @@
+{{- if .Values.controller.admissionWebhooks.enabled -}}
+# before changing this value, check the required kubernetes version
+# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ annotations:
+ {{- if .Values.controller.admissionWebhooks.certManager.enabled }}
+ certmanager.k8s.io/inject-ca-from: {{ printf "%s/%s-admission" .Release.Namespace (include "ingress-nginx.fullname" .) | quote }}
+ cert-manager.io/inject-ca-from: {{ printf "%s/%s-admission" .Release.Namespace (include "ingress-nginx.fullname" .) | quote }}
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.annotations }}
+ {{- toYaml .Values.controller.admissionWebhooks.annotations | nindent 4 }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: admission-webhook
+ {{- with .Values.controller.admissionWebhooks.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+webhooks:
+ - name: validate.nginx.ingress.kubernetes.io
+ matchPolicy: Equivalent
+ rules:
+ - apiGroups:
+ - networking.k8s.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - ingresses
+ failurePolicy: {{ .Values.controller.admissionWebhooks.failurePolicy | default "Fail" }}
+ sideEffects: None
+ admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ namespace: {{ .Release.Namespace | quote }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}-admission
+ path: /networking/v1/ingresses
+ {{- if .Values.controller.admissionWebhooks.timeoutSeconds }}
+ timeoutSeconds: {{ .Values.controller.admissionWebhooks.timeoutSeconds }}
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.namespaceSelector }}
+ namespaceSelector: {{ toYaml .Values.controller.admissionWebhooks.namespaceSelector | nindent 6 }}
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.objectSelector }}
+ objectSelector: {{ toYaml .Values.controller.admissionWebhooks.objectSelector | nindent 6 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/clusterrole.yaml b/charts/ingress-nginx/templates/clusterrole.yaml
new file mode 100644
index 0000000..51bc500
--- /dev/null
+++ b/charts/ingress-nginx/templates/clusterrole.yaml
@@ -0,0 +1,102 @@
+{{- if .Values.rbac.create }}
+
+{{- if and .Values.rbac.scope (not .Values.controller.scope.enabled) -}}
+ {{ required "Invalid configuration: 'rbac.scope' should be equal to 'controller.scope.enabled' (true/false)." (index (dict) ".") }}
+{{- end }}
+
+{{- if not .Values.rbac.scope -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.fullname" . }}
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - endpoints
+ - nodes
+ - pods
+ - secrets
+{{- if not .Values.controller.scope.enabled }}
+ - namespaces
+{{- end}}
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - list
+ - watch
+{{- if and .Values.controller.scope.enabled .Values.controller.scope.namespace }}
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ resourceNames:
+ - "{{ .Values.controller.scope.namespace }}"
+ verbs:
+ - get
+{{- end }}
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses/status
+ verbs:
+ - update
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingressclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - list
+ - watch
+ - get
+{{- end }}
+
+{{- end }}
diff --git a/charts/ingress-nginx/templates/clusterrolebinding.yaml b/charts/ingress-nginx/templates/clusterrolebinding.yaml
new file mode 100644
index 0000000..acbbd8b
--- /dev/null
+++ b/charts/ingress-nginx/templates/clusterrolebinding.yaml
@@ -0,0 +1,19 @@
+{{- if and .Values.rbac.create (not .Values.rbac.scope) -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.fullname" . }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "ingress-nginx.fullname" . }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "ingress-nginx.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace | quote }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-configmap-addheaders.yaml b/charts/ingress-nginx/templates/controller-configmap-addheaders.yaml
new file mode 100644
index 0000000..dfd49a1
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-configmap-addheaders.yaml
@@ -0,0 +1,14 @@
+{{- if .Values.controller.addHeaders -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.fullname" . }}-custom-add-headers
+ namespace: {{ .Release.Namespace }}
+data: {{ toYaml .Values.controller.addHeaders | nindent 2 }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-configmap-proxyheaders.yaml b/charts/ingress-nginx/templates/controller-configmap-proxyheaders.yaml
new file mode 100644
index 0000000..38feb72
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-configmap-proxyheaders.yaml
@@ -0,0 +1,14 @@
+{{- if .Values.controller.proxySetHeaders -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.fullname" . }}-custom-proxy-headers
+ namespace: {{ .Release.Namespace }}
+data: {{ toYaml .Values.controller.proxySetHeaders | nindent 2 }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-configmap-tcp.yaml b/charts/ingress-nginx/templates/controller-configmap-tcp.yaml
new file mode 100644
index 0000000..0f6088e
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-configmap-tcp.yaml
@@ -0,0 +1,17 @@
+{{- if .Values.tcp -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- if .Values.controller.tcp.annotations }}
+ annotations: {{ toYaml .Values.controller.tcp.annotations | nindent 4 }}
+{{- end }}
+ name: {{ include "ingress-nginx.fullname" . }}-tcp
+ namespace: {{ .Release.Namespace }}
+data: {{ tpl (toYaml .Values.tcp) . | nindent 2 }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-configmap-udp.yaml b/charts/ingress-nginx/templates/controller-configmap-udp.yaml
new file mode 100644
index 0000000..3772ec5
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-configmap-udp.yaml
@@ -0,0 +1,17 @@
+{{- if .Values.udp -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- if .Values.controller.udp.annotations }}
+ annotations: {{ toYaml .Values.controller.udp.annotations | nindent 4 }}
+{{- end }}
+ name: {{ include "ingress-nginx.fullname" . }}-udp
+ namespace: {{ .Release.Namespace }}
+data: {{ tpl (toYaml .Values.udp) . | nindent 2 }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-configmap.yaml b/charts/ingress-nginx/templates/controller-configmap.yaml
new file mode 100644
index 0000000..9ec2b83
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-configmap.yaml
@@ -0,0 +1,28 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- if .Values.controller.configAnnotations }}
+ annotations: {{ toYaml .Values.controller.configAnnotations | nindent 4 }}
+{{- end }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+data:
+ allow-snippet-annotations: "{{ .Values.controller.allowSnippetAnnotations }}"
+{{- if .Values.controller.addHeaders }}
+ add-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-add-headers
+{{- end }}
+{{- if .Values.controller.proxySetHeaders }}
+ proxy-set-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-proxy-headers
+{{- end }}
+{{- if .Values.dhParam }}
+ ssl-dh-param: {{ .Release.Namespace }}/{{ include "ingress-nginx.controller.fullname" . }}
+{{- end }}
+{{- range $key, $value := .Values.controller.config }}
+ {{- $key | nindent 2 }}: {{ $value | quote }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-daemonset.yaml b/charts/ingress-nginx/templates/controller-daemonset.yaml
new file mode 100644
index 0000000..82abe75
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-daemonset.yaml
@@ -0,0 +1,239 @@
+{{- if or (eq .Values.controller.kind "DaemonSet") (eq .Values.controller.kind "Both") -}}
+{{- include "isControllerTagValid" . -}}
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ {{- if .Values.controller.annotations }}
+ annotations: {{ toYaml .Values.controller.annotations | nindent 4 }}
+ {{- end }}
+spec:
+ selector:
+ matchLabels:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 6 }}
+ app.kubernetes.io/component: controller
+ revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
+ {{- if .Values.controller.updateStrategy }}
+ updateStrategy: {{ toYaml .Values.controller.updateStrategy | nindent 4 }}
+ {{- end }}
+ minReadySeconds: {{ .Values.controller.minReadySeconds }}
+ template:
+ metadata:
+ {{- if .Values.controller.podAnnotations }}
+ annotations:
+ {{- range $key, $value := .Values.controller.podAnnotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 8 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.podLabels }}
+ {{- toYaml .Values.controller.podLabels | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- if .Values.controller.dnsConfig }}
+ dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.hostname }}
+ hostname: {{ toYaml .Values.controller.hostname | nindent 8 }}
+ {{- end }}
+ dnsPolicy: {{ .Values.controller.dnsPolicy }}
+ {{- if .Values.imagePullSecrets }}
+ imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.priorityClassName }}
+ priorityClassName: {{ .Values.controller.priorityClassName | quote }}
+ {{- end }}
+ {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }}
+ securityContext:
+ {{- end }}
+ {{- if .Values.controller.podSecurityContext }}
+ {{- toYaml .Values.controller.podSecurityContext | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.sysctls }}
+ sysctls:
+ {{- range $sysctl, $value := .Values.controller.sysctls }}
+ - name: {{ $sysctl | quote }}
+ value: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.controller.shareProcessNamespace }}
+ shareProcessNamespace: {{ .Values.controller.shareProcessNamespace }}
+ {{- end }}
+ containers:
+ - name: {{ .Values.controller.containerName }}
+ {{- with .Values.controller.image }}
+ image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ include "ingress-nginx.image" . }}{{- end -}}:{{ .tag }}{{ include "ingress-nginx.imageDigest" . }}"
+ {{- end }}
+ imagePullPolicy: {{ .Values.controller.image.pullPolicy }}
+ {{- if .Values.controller.lifecycle }}
+ lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }}
+ {{- end }}
+ args:
+ {{- include "ingress-nginx.params" . | nindent 12 }}
+ securityContext: {{ include "controller.containerSecurityContext" . | nindent 12 }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- if .Values.controller.enableMimalloc }}
+ - name: LD_PRELOAD
+ value: /usr/local/lib/libmimalloc.so
+ {{- end }}
+ {{- if .Values.controller.extraEnvs }}
+ {{- toYaml .Values.controller.extraEnvs | nindent 12 }}
+ {{- end }}
+ {{- if .Values.controller.startupProbe }}
+ startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }}
+ {{- end }}
+ {{- if .Values.controller.livenessProbe }}
+ livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }}
+ {{- end }}
+ {{- if .Values.controller.readinessProbe }}
+ readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }}
+ {{- end }}
+ ports:
+ {{- range $key, $value := .Values.controller.containerPort }}
+ - name: {{ $key }}
+ containerPort: {{ $value }}
+ protocol: TCP
+ {{- if $.Values.controller.hostPort.enabled }}
+ hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.controller.metrics.enabled }}
+ - name: {{ .Values.controller.metrics.portName }}
+ containerPort: {{ .Values.controller.metrics.port }}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.enabled }}
+ - name: webhook
+ containerPort: {{ .Values.controller.admissionWebhooks.port }}
+ protocol: TCP
+ {{- end }}
+ {{- range $key, $value := .Values.tcp }}
+ - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp
+ containerPort: {{ $key }}
+ protocol: TCP
+ {{- if $.Values.controller.hostPort.enabled }}
+ hostPort: {{ $key }}
+ {{- end }}
+ {{- end }}
+ {{- range $key, $value := .Values.udp }}
+ - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp
+ containerPort: {{ $key }}
+ protocol: UDP
+ {{- if $.Values.controller.hostPort.enabled }}
+ hostPort: {{ $key }}
+ {{- end }}
+ {{- end }}
+ {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules .Values.controller.opentelemetry.enabled) }}
+ volumeMounts:
+ {{- if (or .Values.controller.extraModules .Values.controller.opentelemetry.enabled) }}
+ - name: modules
+ {{ if .Values.controller.image.chroot }}
+ mountPath: /chroot/modules_mount
+ {{ else }}
+ mountPath: /modules_mount
+ {{ end }}
+ {{- end }}
+ {{- if .Values.controller.customTemplate.configMapName }}
+ - mountPath: /etc/nginx/template
+ name: nginx-template-volume
+ readOnly: true
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.enabled }}
+ - name: webhook-cert
+ mountPath: /usr/local/certificates/
+ readOnly: true
+ {{- end }}
+ {{- if .Values.controller.extraVolumeMounts }}
+ {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.controller.resources }}
+ resources: {{ toYaml .Values.controller.resources | nindent 12 }}
+ {{- end }}
+ {{- if .Values.controller.extraContainers }}
+ {{ toYaml .Values.controller.extraContainers | nindent 8 }}
+ {{- end }}
+ {{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules .Values.controller.opentelemetry.enabled) }}
+ initContainers:
+ {{- if .Values.controller.extraInitContainers }}
+ {{ toYaml .Values.controller.extraInitContainers | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.extraModules }}
+ {{- range .Values.controller.extraModules }}
+ {{ $containerSecurityContext := .containerSecurityContext | default $.Values.controller.containerSecurityContext }}
+{{ include "extraModules" (dict "name" .name "image" .image "containerSecurityContext" $containerSecurityContext) | indent 8 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.controller.opentelemetry.enabled}}
+ {{ $otelContainerSecurityContext := $.Values.controller.opentelemetry.containerSecurityContext | default $.Values.controller.containerSecurityContext }}
+ {{- include "extraModules" (dict "name" "opentelemetry" "image" .Values.controller.opentelemetry.image "containerSecurityContext" $otelContainerSecurityContext) | nindent 8}}
+ {{- end}}
+ {{- end }}
+ {{- if .Values.controller.hostNetwork }}
+ hostNetwork: {{ .Values.controller.hostNetwork }}
+ {{- end }}
+ {{- if .Values.controller.nodeSelector }}
+ nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.tolerations }}
+ tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.affinity }}
+ affinity: {{ toYaml .Values.controller.affinity | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.topologySpreadConstraints }}
+ topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }}
+ terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }}
+ {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules .Values.controller.opentelemetry.enabled) }}
+ volumes:
+ {{- if (or .Values.controller.extraModules .Values.controller.opentelemetry.enabled)}}
+ - name: modules
+ emptyDir: {}
+ {{- end }}
+ {{- if .Values.controller.customTemplate.configMapName }}
+ - name: nginx-template-volume
+ configMap:
+ name: {{ .Values.controller.customTemplate.configMapName }}
+ items:
+ - key: {{ .Values.controller.customTemplate.configMapKey }}
+ path: nginx.tmpl
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.enabled }}
+ - name: webhook-cert
+ secret:
+ secretName: {{ include "ingress-nginx.fullname" . }}-admission
+ {{- if .Values.controller.admissionWebhooks.certManager.enabled }}
+ items:
+ - key: tls.crt
+ path: cert
+ - key: tls.key
+ path: key
+ {{- end }}
+ {{- end }}
+ {{- if .Values.controller.extraVolumes }}
+ {{ toYaml .Values.controller.extraVolumes | nindent 8 }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-deployment.yaml b/charts/ingress-nginx/templates/controller-deployment.yaml
new file mode 100644
index 0000000..323d876
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-deployment.yaml
@@ -0,0 +1,243 @@
+{{- if or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both") -}}
+{{- include "isControllerTagValid" . -}}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ {{- if .Values.controller.annotations }}
+ annotations: {{ toYaml .Values.controller.annotations | nindent 4 }}
+ {{- end }}
+spec:
+ selector:
+ matchLabels:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 6 }}
+ app.kubernetes.io/component: controller
+ {{- if not .Values.controller.autoscaling.enabled }}
+ replicas: {{ .Values.controller.replicaCount }}
+ {{- end }}
+ revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
+ {{- if .Values.controller.updateStrategy }}
+ strategy:
+ {{ toYaml .Values.controller.updateStrategy | nindent 4 }}
+ {{- end }}
+ minReadySeconds: {{ .Values.controller.minReadySeconds }}
+ template:
+ metadata:
+ {{- if .Values.controller.podAnnotations }}
+ annotations:
+ {{- range $key, $value := .Values.controller.podAnnotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 8 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.podLabels }}
+ {{- toYaml .Values.controller.podLabels | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- if .Values.controller.dnsConfig }}
+ dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.hostname }}
+ hostname: {{ toYaml .Values.controller.hostname | nindent 8 }}
+ {{- end }}
+ dnsPolicy: {{ .Values.controller.dnsPolicy }}
+ {{- if .Values.imagePullSecrets }}
+ imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.priorityClassName }}
+ priorityClassName: {{ .Values.controller.priorityClassName | quote }}
+ {{- end }}
+ {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }}
+ securityContext:
+ {{- end }}
+ {{- if .Values.controller.podSecurityContext }}
+ {{- toYaml .Values.controller.podSecurityContext | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.sysctls }}
+ sysctls:
+ {{- range $sysctl, $value := .Values.controller.sysctls }}
+ - name: {{ $sysctl | quote }}
+ value: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.controller.shareProcessNamespace }}
+ shareProcessNamespace: {{ .Values.controller.shareProcessNamespace }}
+ {{- end }}
+ containers:
+ - name: {{ .Values.controller.containerName }}
+ {{- with .Values.controller.image }}
+ image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ include "ingress-nginx.image" . }}{{- end -}}:{{ .tag }}{{ include "ingress-nginx.imageDigest" . }}"
+ {{- end }}
+ imagePullPolicy: {{ .Values.controller.image.pullPolicy }}
+ {{- if .Values.controller.lifecycle }}
+ lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }}
+ {{- end }}
+ args:
+ {{- include "ingress-nginx.params" . | nindent 12 }}
+ securityContext: {{ include "controller.containerSecurityContext" . | nindent 12 }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- if .Values.controller.enableMimalloc }}
+ - name: LD_PRELOAD
+ value: /usr/local/lib/libmimalloc.so
+ {{- end }}
+ {{- if .Values.controller.extraEnvs }}
+ {{- toYaml .Values.controller.extraEnvs | nindent 12 }}
+ {{- end }}
+ {{- if .Values.controller.startupProbe }}
+ startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }}
+ {{- end }}
+ {{- if .Values.controller.livenessProbe }}
+ livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }}
+ {{- end }}
+ {{- if .Values.controller.readinessProbe }}
+ readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }}
+ {{- end }}
+ ports:
+ {{- range $key, $value := .Values.controller.containerPort }}
+ - name: {{ $key }}
+ containerPort: {{ $value }}
+ protocol: TCP
+ {{- if $.Values.controller.hostPort.enabled }}
+ hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.controller.metrics.enabled }}
+ - name: {{ .Values.controller.metrics.portName }}
+ containerPort: {{ .Values.controller.metrics.port }}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.enabled }}
+ - name: webhook
+ containerPort: {{ .Values.controller.admissionWebhooks.port }}
+ protocol: TCP
+ {{- end }}
+ {{- range $key, $value := .Values.tcp }}
+ - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp
+ containerPort: {{ $key }}
+ protocol: TCP
+ {{- if $.Values.controller.hostPort.enabled }}
+ hostPort: {{ $key }}
+ {{- end }}
+ {{- end }}
+ {{- range $key, $value := .Values.udp }}
+ - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp
+ containerPort: {{ $key }}
+ protocol: UDP
+ {{- if $.Values.controller.hostPort.enabled }}
+ hostPort: {{ $key }}
+ {{- end }}
+ {{- end }}
+ {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules .Values.controller.opentelemetry.enabled) }}
+ volumeMounts:
+ {{- if (or .Values.controller.extraModules .Values.controller.opentelemetry.enabled) }}
+ - name: modules
+ {{ if .Values.controller.image.chroot }}
+ mountPath: /chroot/modules_mount
+ {{ else }}
+ mountPath: /modules_mount
+ {{ end }}
+ {{- end }}
+ {{- if .Values.controller.customTemplate.configMapName }}
+ - mountPath: /etc/nginx/template
+ name: nginx-template-volume
+ readOnly: true
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.enabled }}
+ - name: webhook-cert
+ mountPath: /usr/local/certificates/
+ readOnly: true
+ {{- end }}
+ {{- if .Values.controller.extraVolumeMounts }}
+ {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.controller.resources }}
+ resources: {{ toYaml .Values.controller.resources | nindent 12 }}
+ {{- end }}
+ {{- if .Values.controller.extraContainers }}
+ {{ toYaml .Values.controller.extraContainers | nindent 8 }}
+ {{- end }}
+ {{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules .Values.controller.opentelemetry.enabled) }}
+ initContainers:
+ {{- if .Values.controller.extraInitContainers }}
+ {{ toYaml .Values.controller.extraInitContainers | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.extraModules }}
+ {{- range .Values.controller.extraModules }}
+ {{ $containerSecurityContext := .containerSecurityContext | default $.Values.controller.containerSecurityContext }}
+{{ include "extraModules" (dict "name" .name "image" .image "containerSecurityContext" $containerSecurityContext) | indent 8 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.controller.opentelemetry.enabled}}
+ {{ $otelContainerSecurityContext := $.Values.controller.opentelemetry.containerSecurityContext | default $.Values.controller.containerSecurityContext }}
+ {{- include "extraModules" (dict "name" "opentelemetry" "image" .Values.controller.opentelemetry.image "containerSecurityContext" $otelContainerSecurityContext) | nindent 8}}
+ {{- end}}
+ {{- end }}
+ {{- if .Values.controller.hostNetwork }}
+ hostNetwork: {{ .Values.controller.hostNetwork }}
+ {{- end }}
+ {{- if .Values.controller.nodeSelector }}
+ nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.tolerations }}
+ tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.affinity }}
+ affinity: {{ toYaml .Values.controller.affinity | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.topologySpreadConstraints }}
+ topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }}
+ terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }}
+ {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules .Values.controller.opentelemetry.enabled) }}
+ volumes:
+ {{- if (or .Values.controller.extraModules .Values.controller.opentelemetry.enabled)}}
+ - name: modules
+ emptyDir: {}
+ {{- end }}
+ {{- if .Values.controller.customTemplate.configMapName }}
+ - name: nginx-template-volume
+ configMap:
+ name: {{ .Values.controller.customTemplate.configMapName }}
+ items:
+ - key: {{ .Values.controller.customTemplate.configMapKey }}
+ path: nginx.tmpl
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.enabled }}
+ - name: webhook-cert
+ secret:
+ secretName: {{ include "ingress-nginx.fullname" . }}-admission
+ {{- if .Values.controller.admissionWebhooks.certManager.enabled }}
+ items:
+ - key: tls.crt
+ path: cert
+ - key: tls.key
+ path: key
+ {{- end }}
+ {{- end }}
+ {{- if .Values.controller.extraVolumes }}
+ {{ toYaml .Values.controller.extraVolumes | nindent 8 }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-hpa.yaml b/charts/ingress-nginx/templates/controller-hpa.yaml
new file mode 100644
index 0000000..96a91f5
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-hpa.yaml
@@ -0,0 +1,47 @@
+{{- if and (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) .Values.controller.autoscaling.enabled (not .Values.controller.keda.enabled) -}}
+apiVersion: {{ ternary "autoscaling/v2" "autoscaling/v2beta2" (.Capabilities.APIVersions.Has "autoscaling/v2") }}
+kind: HorizontalPodAutoscaler
+metadata:
+ {{- with .Values.controller.autoscaling.annotations }}
+ annotations: {{ toYaml . | nindent 4 }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+ minReplicas: {{ .Values.controller.autoscaling.minReplicas }}
+ maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }}
+ metrics:
+ {{- with .Values.controller.autoscaling.targetCPUUtilizationPercentage }}
+ - type: Resource
+ resource:
+ name: cpu
+ target:
+ type: Utilization
+ averageUtilization: {{ . }}
+ {{- end }}
+ {{- with .Values.controller.autoscaling.targetMemoryUtilizationPercentage }}
+ - type: Resource
+ resource:
+ name: memory
+ target:
+ type: Utilization
+ averageUtilization: {{ . }}
+ {{- end }}
+ {{- with .Values.controller.autoscalingTemplate }}
+ {{- toYaml . | nindent 2 }}
+ {{- end }}
+ {{- with .Values.controller.autoscaling.behavior }}
+ behavior:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-ingressclass.yaml b/charts/ingress-nginx/templates/controller-ingressclass.yaml
new file mode 100644
index 0000000..9492784
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-ingressclass.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.controller.ingressClassResource.enabled -}}
+# We don't support namespaced ingressClass yet
+# So a ClusterRole and a ClusterRoleBinding is required
+apiVersion: networking.k8s.io/v1
+kind: IngressClass
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ .Values.controller.ingressClassResource.name }}
+{{- if .Values.controller.ingressClassResource.default }}
+ annotations:
+ ingressclass.kubernetes.io/is-default-class: "true"
+{{- end }}
+spec:
+ controller: {{ .Values.controller.ingressClassResource.controllerValue }}
+ {{ template "ingressClass.parameters" . }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-keda.yaml b/charts/ingress-nginx/templates/controller-keda.yaml
new file mode 100644
index 0000000..875157e
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-keda.yaml
@@ -0,0 +1,42 @@
+{{- if and .Values.controller.keda.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}}
+# https://keda.sh/docs/
+
+apiVersion: {{ .Values.controller.keda.apiVersion }}
+kind: ScaledObject
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+ {{- if .Values.controller.keda.scaledObject.annotations }}
+ annotations: {{ toYaml .Values.controller.keda.scaledObject.annotations | nindent 4 }}
+ {{- end }}
+spec:
+ scaleTargetRef:
+{{- if eq .Values.controller.keda.apiVersion "keda.k8s.io/v1alpha1" }}
+ deploymentName: {{ include "ingress-nginx.controller.fullname" . }}
+{{- else if eq .Values.controller.keda.apiVersion "keda.sh/v1alpha1" }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+{{- end }}
+ pollingInterval: {{ .Values.controller.keda.pollingInterval }}
+ cooldownPeriod: {{ .Values.controller.keda.cooldownPeriod }}
+ minReplicaCount: {{ .Values.controller.keda.minReplicas }}
+ maxReplicaCount: {{ .Values.controller.keda.maxReplicas }}
+ triggers:
+{{- with .Values.controller.keda.triggers }}
+{{ toYaml . | indent 2 }}
+{{ end }}
+ advanced:
+ restoreToOriginalReplicaCount: {{ .Values.controller.keda.restoreToOriginalReplicaCount }}
+{{- if .Values.controller.keda.behavior }}
+ horizontalPodAutoscalerConfig:
+ behavior:
+{{ with .Values.controller.keda.behavior -}}
+{{ toYaml . | indent 8 }}
+{{ end }}
+
+{{- end }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-poddisruptionbudget.yaml b/charts/ingress-nginx/templates/controller-poddisruptionbudget.yaml
new file mode 100644
index 0000000..91be580
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-poddisruptionbudget.yaml
@@ -0,0 +1,26 @@
+{{- if or (and .Values.controller.autoscaling.enabled (gt (.Values.controller.autoscaling.minReplicas | int) 1)) (and (not .Values.controller.autoscaling.enabled) (gt (.Values.controller.replicaCount | int) 1)) }}
+apiVersion: {{ ternary "policy/v1" "policy/v1beta1" (semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version) }}
+kind: PodDisruptionBudget
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ {{- if .Values.controller.annotations }}
+ annotations: {{ toYaml .Values.controller.annotations | nindent 4 }}
+ {{- end }}
+spec:
+ selector:
+ matchLabels:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 6 }}
+ app.kubernetes.io/component: controller
+ {{- if and .Values.controller.minAvailable (not (hasKey .Values.controller "maxUnavailable")) }}
+ minAvailable: {{ .Values.controller.minAvailable }}
+ {{- else if .Values.controller.maxUnavailable }}
+ maxUnavailable: {{ .Values.controller.maxUnavailable }}
+ {{- end }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-prometheusrules.yaml b/charts/ingress-nginx/templates/controller-prometheusrules.yaml
new file mode 100644
index 0000000..78b5362
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-prometheusrules.yaml
@@ -0,0 +1,21 @@
+{{- if and ( .Values.controller.metrics.enabled ) ( .Values.controller.metrics.prometheusRule.enabled ) ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) -}}
+apiVersion: monitoring.coreos.com/v1
+kind: PrometheusRule
+metadata:
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+{{- if .Values.controller.metrics.prometheusRule.namespace }}
+ namespace: {{ .Values.controller.metrics.prometheusRule.namespace | quote }}
+{{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- if .Values.controller.metrics.prometheusRule.additionalLabels }}
+ {{- toYaml .Values.controller.metrics.prometheusRule.additionalLabels | nindent 4 }}
+ {{- end }}
+spec:
+{{- if .Values.controller.metrics.prometheusRule.rules }}
+ groups:
+ - name: {{ template "ingress-nginx.name" . }}
+ rules: {{- toYaml .Values.controller.metrics.prometheusRule.rules | nindent 4 }}
+{{- end }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-psp.yaml b/charts/ingress-nginx/templates/controller-psp.yaml
new file mode 100644
index 0000000..3c499b9
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-psp.yaml
@@ -0,0 +1,94 @@
+{{- if (semverCompare "<1.25.0-0" .Capabilities.KubeVersion.Version) }}
+{{- if and .Values.podSecurityPolicy.enabled (empty .Values.controller.existingPsp) -}}
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ allowedCapabilities:
+ - NET_BIND_SERVICE
+ {{- if .Values.controller.image.chroot }}
+ - SYS_CHROOT
+ {{- end }}
+{{- if .Values.controller.sysctls }}
+ allowedUnsafeSysctls:
+ {{- range $sysctl, $value := .Values.controller.sysctls }}
+ - {{ $sysctl }}
+ {{- end }}
+{{- end }}
+ privileged: false
+ allowPrivilegeEscalation: true
+ # Allow core volume types.
+ volumes:
+ - 'configMap'
+ - 'emptyDir'
+ - 'projected'
+ - 'secret'
+ - 'downwardAPI'
+{{- if .Values.controller.hostNetwork }}
+ hostNetwork: {{ .Values.controller.hostNetwork }}
+{{- end }}
+{{- if or .Values.controller.hostNetwork .Values.controller.hostPort.enabled }}
+ hostPorts:
+{{- if .Values.controller.hostNetwork }}
+{{- range $key, $value := .Values.controller.containerPort }}
+ # {{ $key }}
+ - min: {{ $value }}
+ max: {{ $value }}
+{{- end }}
+{{- else if .Values.controller.hostPort.enabled }}
+{{- range $key, $value := .Values.controller.hostPort.ports }}
+ # {{ $key }}
+ - min: {{ $value }}
+ max: {{ $value }}
+{{- end }}
+{{- end }}
+{{- if .Values.controller.metrics.enabled }}
+ # metrics
+ - min: {{ .Values.controller.metrics.port }}
+ max: {{ .Values.controller.metrics.port }}
+{{- end }}
+{{- if .Values.controller.admissionWebhooks.enabled }}
+ # admission webhooks
+ - min: {{ .Values.controller.admissionWebhooks.port }}
+ max: {{ .Values.controller.admissionWebhooks.port }}
+{{- end }}
+{{- range $key, $value := .Values.tcp }}
+ # {{ $key }}-tcp
+ - min: {{ $key }}
+ max: {{ $key }}
+{{- end }}
+{{- range $key, $value := .Values.udp }}
+ # {{ $key }}-udp
+ - min: {{ $key }}
+ max: {{ $key }}
+{{- end }}
+{{- end }}
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ # Require the container to run without root privileges.
+ rule: 'MustRunAsNonRoot'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ # Forbid adding the root group.
+ - min: 1
+ max: 65535
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ # Forbid adding the root group.
+ - min: 1
+ max: 65535
+ readOnlyRootFilesystem: false
+ seLinux:
+ rule: 'RunAsAny'
+{{- end }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-role.yaml b/charts/ingress-nginx/templates/controller-role.yaml
new file mode 100644
index 0000000..d1aa9aa
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-role.yaml
@@ -0,0 +1,101 @@
+{{- if .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - pods
+ - secrets
+ - endpoints
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses/status
+ verbs:
+ - update
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingressclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ resourceNames:
+ - {{ include "ingress-nginx.controller.electionID" . }}
+ verbs:
+ - get
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - list
+ - watch
+ - get
+{{- if .Values.podSecurityPolicy.enabled }}
+ - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}]
+ resources: ['podsecuritypolicies']
+ verbs: ['use']
+ {{- with .Values.controller.existingPsp }}
+ resourceNames: [{{ . }}]
+ {{- else }}
+ resourceNames: [{{ include "ingress-nginx.fullname" . }}]
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-rolebinding.yaml b/charts/ingress-nginx/templates/controller-rolebinding.yaml
new file mode 100644
index 0000000..e846a11
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-rolebinding.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ include "ingress-nginx.fullname" . }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "ingress-nginx.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace | quote }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-secret.yaml b/charts/ingress-nginx/templates/controller-secret.yaml
new file mode 100644
index 0000000..f374423
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-secret.yaml
@@ -0,0 +1,15 @@
+{{- if .Values.dhParam -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+data:
+ dhparam.pem: {{ .Values.dhParam }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-service-internal.yaml b/charts/ingress-nginx/templates/controller-service-internal.yaml
new file mode 100644
index 0000000..87146b7
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-service-internal.yaml
@@ -0,0 +1,79 @@
+{{- if and .Values.controller.service.enabled .Values.controller.service.internal.enabled .Values.controller.service.internal.annotations}}
+apiVersion: v1
+kind: Service
+metadata:
+ annotations:
+ {{- range $key, $value := .Values.controller.service.internal.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- if .Values.controller.service.labels }}
+ {{- toYaml .Values.controller.service.labels | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}-internal
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: "{{ .Values.controller.service.type }}"
+{{- if .Values.controller.service.internal.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.controller.service.internal.loadBalancerIP }}
+{{- end }}
+{{- if .Values.controller.service.internal.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges: {{ toYaml .Values.controller.service.internal.loadBalancerSourceRanges | nindent 4 }}
+{{- end }}
+{{- if .Values.controller.service.internal.externalTrafficPolicy }}
+ externalTrafficPolicy: {{ .Values.controller.service.internal.externalTrafficPolicy }}
+{{- end }}
+ ports:
+ {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }}
+ {{- if .Values.controller.service.enableHttp }}
+ - name: http
+ port: {{ .Values.controller.service.internal.ports.http | default .Values.controller.service.ports.http }}
+ protocol: TCP
+ targetPort: {{ .Values.controller.service.internal.targetPorts.http | default .Values.controller.service.targetPorts.http }}
+ {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }}
+ appProtocol: http
+ {{- end }}
+ {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }}
+ nodePort: {{ .Values.controller.service.nodePorts.http }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.controller.service.enableHttps }}
+ - name: https
+ port: {{ .Values.controller.service.internal.ports.https | default .Values.controller.service.ports.https }}
+ protocol: TCP
+ targetPort: {{ .Values.controller.service.internal.targetPorts.https | default .Values.controller.service.targetPorts.https }}
+ {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }}
+ appProtocol: https
+ {{- end }}
+ {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }}
+ nodePort: {{ .Values.controller.service.nodePorts.https }}
+ {{- end }}
+ {{- end }}
+ {{- range $key, $value := .Values.tcp }}
+ - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp
+ port: {{ $key }}
+ protocol: TCP
+ targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp
+ {{- if $.Values.controller.service.nodePorts.tcp }}
+ {{- if index $.Values.controller.service.nodePorts.tcp $key }}
+ nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- range $key, $value := .Values.udp }}
+ - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp
+ port: {{ $key }}
+ protocol: UDP
+ targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp
+ {{- if $.Values.controller.service.nodePorts.udp }}
+ {{- if index $.Values.controller.service.nodePorts.udp $key }}
+ nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ selector:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-service-metrics.yaml b/charts/ingress-nginx/templates/controller-service-metrics.yaml
new file mode 100644
index 0000000..b178401
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-service-metrics.yaml
@@ -0,0 +1,45 @@
+{{- if .Values.controller.metrics.enabled -}}
+apiVersion: v1
+kind: Service
+metadata:
+{{- if .Values.controller.metrics.service.annotations }}
+ annotations: {{ toYaml .Values.controller.metrics.service.annotations | nindent 4 }}
+{{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- if .Values.controller.metrics.service.labels }}
+ {{- toYaml .Values.controller.metrics.service.labels | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}-metrics
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: {{ .Values.controller.metrics.service.type }}
+{{- if .Values.controller.metrics.service.clusterIP }}
+ clusterIP: {{ .Values.controller.metrics.service.clusterIP }}
+{{- end }}
+{{- if .Values.controller.metrics.service.externalIPs }}
+ externalIPs: {{ toYaml .Values.controller.metrics.service.externalIPs | nindent 4 }}
+{{- end }}
+{{- if .Values.controller.metrics.service.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.controller.metrics.service.loadBalancerIP }}
+{{- end }}
+{{- if .Values.controller.metrics.service.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges: {{ toYaml .Values.controller.metrics.service.loadBalancerSourceRanges | nindent 4 }}
+{{- end }}
+{{- if .Values.controller.metrics.service.externalTrafficPolicy }}
+ externalTrafficPolicy: {{ .Values.controller.metrics.service.externalTrafficPolicy }}
+{{- end }}
+ ports:
+ - name: {{ .Values.controller.metrics.portName }}
+ port: {{ .Values.controller.metrics.service.servicePort }}
+ protocol: TCP
+ targetPort: {{ .Values.controller.metrics.portName }}
+ {{- $setNodePorts := (or (eq .Values.controller.metrics.service.type "NodePort") (eq .Values.controller.metrics.service.type "LoadBalancer")) }}
+ {{- if (and $setNodePorts (not (empty .Values.controller.metrics.service.nodePort))) }}
+ nodePort: {{ .Values.controller.metrics.service.nodePort }}
+ {{- end }}
+ selector:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-service-webhook.yaml b/charts/ingress-nginx/templates/controller-service-webhook.yaml
new file mode 100644
index 0000000..2aae24f
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-service-webhook.yaml
@@ -0,0 +1,40 @@
+{{- if .Values.controller.admissionWebhooks.enabled -}}
+apiVersion: v1
+kind: Service
+metadata:
+{{- if .Values.controller.admissionWebhooks.service.annotations }}
+ annotations: {{ toYaml .Values.controller.admissionWebhooks.service.annotations | nindent 4 }}
+{{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}-admission
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: {{ .Values.controller.admissionWebhooks.service.type }}
+{{- if .Values.controller.admissionWebhooks.service.clusterIP }}
+ clusterIP: {{ .Values.controller.admissionWebhooks.service.clusterIP }}
+{{- end }}
+{{- if .Values.controller.admissionWebhooks.service.externalIPs }}
+ externalIPs: {{ toYaml .Values.controller.admissionWebhooks.service.externalIPs | nindent 4 }}
+{{- end }}
+{{- if .Values.controller.admissionWebhooks.service.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.controller.admissionWebhooks.service.loadBalancerIP }}
+{{- end }}
+{{- if .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges: {{ toYaml .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges | nindent 4 }}
+{{- end }}
+ ports:
+ - name: https-webhook
+ port: 443
+ targetPort: webhook
+ {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }}
+ appProtocol: https
+ {{- end }}
+ selector:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-service.yaml b/charts/ingress-nginx/templates/controller-service.yaml
new file mode 100644
index 0000000..2b28196
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-service.yaml
@@ -0,0 +1,101 @@
+{{- if and .Values.controller.service.enabled .Values.controller.service.external.enabled -}}
+apiVersion: v1
+kind: Service
+metadata:
+ annotations:
+ {{- range $key, $value := .Values.controller.service.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- if .Values.controller.service.labels }}
+ {{- toYaml .Values.controller.service.labels | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: {{ .Values.controller.service.type }}
+{{- if .Values.controller.service.clusterIP }}
+ clusterIP: {{ .Values.controller.service.clusterIP }}
+{{- end }}
+{{- if .Values.controller.service.externalIPs }}
+ externalIPs: {{ toYaml .Values.controller.service.externalIPs | nindent 4 }}
+{{- end }}
+{{- if .Values.controller.service.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.controller.service.loadBalancerIP }}
+{{- end }}
+{{- if .Values.controller.service.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges: {{ toYaml .Values.controller.service.loadBalancerSourceRanges | nindent 4 }}
+{{- end }}
+{{- if .Values.controller.service.externalTrafficPolicy }}
+ externalTrafficPolicy: {{ .Values.controller.service.externalTrafficPolicy }}
+{{- end }}
+{{- if .Values.controller.service.sessionAffinity }}
+ sessionAffinity: {{ .Values.controller.service.sessionAffinity }}
+{{- end }}
+{{- if .Values.controller.service.healthCheckNodePort }}
+ healthCheckNodePort: {{ .Values.controller.service.healthCheckNodePort }}
+{{- end }}
+{{- if semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version -}}
+{{- if .Values.controller.service.ipFamilyPolicy }}
+ ipFamilyPolicy: {{ .Values.controller.service.ipFamilyPolicy }}
+{{- end }}
+{{- end }}
+{{- if semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version -}}
+{{- if .Values.controller.service.ipFamilies }}
+ ipFamilies: {{ toYaml .Values.controller.service.ipFamilies | nindent 4 }}
+{{- end }}
+{{- end }}
+ ports:
+ {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }}
+ {{- if .Values.controller.service.enableHttp }}
+ - name: http
+ port: {{ .Values.controller.service.ports.http }}
+ protocol: TCP
+ targetPort: {{ .Values.controller.service.targetPorts.http }}
+ {{- if and (semverCompare ">=1.20" .Capabilities.KubeVersion.Version) (.Values.controller.service.appProtocol) }}
+ appProtocol: http
+ {{- end }}
+ {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }}
+ nodePort: {{ .Values.controller.service.nodePorts.http }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.controller.service.enableHttps }}
+ - name: https
+ port: {{ .Values.controller.service.ports.https }}
+ protocol: TCP
+ targetPort: {{ .Values.controller.service.targetPorts.https }}
+ {{- if and (semverCompare ">=1.20" .Capabilities.KubeVersion.Version) (.Values.controller.service.appProtocol) }}
+ appProtocol: https
+ {{- end }}
+ {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }}
+ nodePort: {{ .Values.controller.service.nodePorts.https }}
+ {{- end }}
+ {{- end }}
+ {{- range $key, $value := .Values.tcp }}
+ - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp
+ port: {{ $key }}
+ protocol: TCP
+ targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp
+ {{- if $.Values.controller.service.nodePorts.tcp }}
+ {{- if index $.Values.controller.service.nodePorts.tcp $key }}
+ nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- range $key, $value := .Values.udp }}
+ - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp
+ port: {{ $key }}
+ protocol: UDP
+ targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp
+ {{- if $.Values.controller.service.nodePorts.udp }}
+ {{- if index $.Values.controller.service.nodePorts.udp $key }}
+ nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ selector:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-serviceaccount.yaml b/charts/ingress-nginx/templates/controller-serviceaccount.yaml
new file mode 100644
index 0000000..e6e776d
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-serviceaccount.yaml
@@ -0,0 +1,18 @@
+{{- if or .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ template "ingress-nginx.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+ {{- if .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml .Values.serviceAccount.annotations | nindent 4 }}
+ {{- end }}
+automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-servicemonitor.yaml b/charts/ingress-nginx/templates/controller-servicemonitor.yaml
new file mode 100644
index 0000000..8ab16f0
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-servicemonitor.yaml
@@ -0,0 +1,48 @@
+{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.serviceMonitor.enabled -}}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+{{- if .Values.controller.metrics.serviceMonitor.namespace }}
+ namespace: {{ .Values.controller.metrics.serviceMonitor.namespace | quote }}
+{{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- if .Values.controller.metrics.serviceMonitor.additionalLabels }}
+ {{- toYaml .Values.controller.metrics.serviceMonitor.additionalLabels | nindent 4 }}
+ {{- end }}
+spec:
+ endpoints:
+ - port: {{ .Values.controller.metrics.portName }}
+ interval: {{ .Values.controller.metrics.serviceMonitor.scrapeInterval }}
+ {{- if .Values.controller.metrics.serviceMonitor.honorLabels }}
+ honorLabels: true
+ {{- end }}
+ {{- if .Values.controller.metrics.serviceMonitor.relabelings }}
+ relabelings: {{ toYaml .Values.controller.metrics.serviceMonitor.relabelings | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.metrics.serviceMonitor.metricRelabelings }}
+ metricRelabelings: {{ toYaml .Values.controller.metrics.serviceMonitor.metricRelabelings | nindent 8 }}
+ {{- end }}
+{{- if .Values.controller.metrics.serviceMonitor.jobLabel }}
+ jobLabel: {{ .Values.controller.metrics.serviceMonitor.jobLabel | quote }}
+{{- end }}
+{{- if .Values.controller.metrics.serviceMonitor.namespaceSelector }}
+ namespaceSelector: {{ toYaml .Values.controller.metrics.serviceMonitor.namespaceSelector | nindent 4 }}
+{{- else }}
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace }}
+{{- end }}
+{{- if .Values.controller.metrics.serviceMonitor.targetLabels }}
+ targetLabels:
+ {{- range .Values.controller.metrics.serviceMonitor.targetLabels }}
+ - {{ . }}
+ {{- end }}
+{{- end }}
+ selector:
+ matchLabels:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 6 }}
+ app.kubernetes.io/component: controller
+{{- end }}
diff --git a/charts/ingress-nginx/templates/controller-webhooks-networkpolicy.yaml b/charts/ingress-nginx/templates/controller-webhooks-networkpolicy.yaml
new file mode 100644
index 0000000..f74c2fb
--- /dev/null
+++ b/charts/ingress-nginx/templates/controller-webhooks-networkpolicy.yaml
@@ -0,0 +1,19 @@
+{{- if .Values.controller.admissionWebhooks.enabled }}
+{{- if .Values.controller.admissionWebhooks.networkPolicyEnabled }}
+
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-webhooks-allow
+ namespace: {{ .Release.Namespace }}
+spec:
+ ingress:
+ - {}
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "ingress-nginx.name" . }}
+ policyTypes:
+ - Ingress
+
+{{- end }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/default-backend-deployment.yaml b/charts/ingress-nginx/templates/default-backend-deployment.yaml
new file mode 100644
index 0000000..87aced4
--- /dev/null
+++ b/charts/ingress-nginx/templates/default-backend-deployment.yaml
@@ -0,0 +1,123 @@
+{{- if .Values.defaultBackend.enabled -}}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: default-backend
+ {{- with .Values.defaultBackend.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.defaultBackend.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 6 }}
+ app.kubernetes.io/component: default-backend
+{{- if not .Values.defaultBackend.autoscaling.enabled }}
+ replicas: {{ .Values.defaultBackend.replicaCount }}
+{{- end }}
+ revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
+ {{- if .Values.defaultBackend.updateStrategy }}
+ strategy:
+ {{ toYaml .Values.defaultBackend.updateStrategy | nindent 4 }}
+ {{- end }}
+ minReadySeconds: {{ .Values.defaultBackend.minReadySeconds }}
+ template:
+ metadata:
+ {{- if .Values.defaultBackend.podAnnotations }}
+ annotations: {{ toYaml .Values.defaultBackend.podAnnotations | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 8 }}
+ app.kubernetes.io/component: default-backend
+ {{- with .Values.defaultBackend.labels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- if .Values.defaultBackend.podLabels }}
+ {{- toYaml .Values.defaultBackend.podLabels | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- if .Values.imagePullSecrets }}
+ imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }}
+ {{- end }}
+ {{- if .Values.defaultBackend.priorityClassName }}
+ priorityClassName: {{ .Values.defaultBackend.priorityClassName }}
+ {{- end }}
+ {{- if .Values.defaultBackend.podSecurityContext }}
+ securityContext: {{ toYaml .Values.defaultBackend.podSecurityContext | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: {{ template "ingress-nginx.name" . }}-default-backend
+ {{- with .Values.defaultBackend.image }}
+ image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}"
+ {{- end }}
+ imagePullPolicy: {{ .Values.defaultBackend.image.pullPolicy }}
+ {{- if .Values.defaultBackend.extraArgs }}
+ args:
+ {{- range $key, $value := .Values.defaultBackend.extraArgs }}
+ {{- /* Accept keys without values or with false as value */}}
+ {{- if eq ($value | quote | len) 2 }}
+ - --{{ $key }}
+ {{- else }}
+ - --{{ $key }}={{ $value }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ runAsUser: {{ .Values.defaultBackend.image.runAsUser }}
+ runAsNonRoot: {{ .Values.defaultBackend.image.runAsNonRoot }}
+ allowPrivilegeEscalation: {{ .Values.defaultBackend.image.allowPrivilegeEscalation }}
+ readOnlyRootFilesystem: {{ .Values.defaultBackend.image.readOnlyRootFilesystem}}
+ {{- if .Values.defaultBackend.extraEnvs }}
+ env: {{ toYaml .Values.defaultBackend.extraEnvs | nindent 12 }}
+ {{- end }}
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: {{ .Values.defaultBackend.port }}
+ scheme: HTTP
+ initialDelaySeconds: {{ .Values.defaultBackend.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.defaultBackend.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.defaultBackend.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.defaultBackend.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.defaultBackend.livenessProbe.failureThreshold }}
+ readinessProbe:
+ httpGet:
+ path: /healthz
+ port: {{ .Values.defaultBackend.port }}
+ scheme: HTTP
+ initialDelaySeconds: {{ .Values.defaultBackend.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.defaultBackend.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.defaultBackend.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.defaultBackend.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.defaultBackend.readinessProbe.failureThreshold }}
+ ports:
+ - name: http
+ containerPort: {{ .Values.defaultBackend.port }}
+ protocol: TCP
+ {{- if .Values.defaultBackend.extraVolumeMounts }}
+ volumeMounts: {{- toYaml .Values.defaultBackend.extraVolumeMounts | nindent 12 }}
+ {{- end }}
+ {{- if .Values.defaultBackend.resources }}
+ resources: {{ toYaml .Values.defaultBackend.resources | nindent 12 }}
+ {{- end }}
+ {{- if .Values.defaultBackend.nodeSelector }}
+ nodeSelector: {{ toYaml .Values.defaultBackend.nodeSelector | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }}
+ {{- if .Values.defaultBackend.tolerations }}
+ tolerations: {{ toYaml .Values.defaultBackend.tolerations | nindent 8 }}
+ {{- end }}
+ {{- if .Values.defaultBackend.affinity }}
+ affinity: {{ toYaml .Values.defaultBackend.affinity | nindent 8 }}
+ {{- end }}
+ terminationGracePeriodSeconds: 60
+ {{- if .Values.defaultBackend.extraVolumes }}
+ volumes: {{ toYaml .Values.defaultBackend.extraVolumes | nindent 8 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/default-backend-hpa.yaml b/charts/ingress-nginx/templates/default-backend-hpa.yaml
new file mode 100644
index 0000000..faaf4fa
--- /dev/null
+++ b/charts/ingress-nginx/templates/default-backend-hpa.yaml
@@ -0,0 +1,40 @@
+{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.autoscaling.enabled }}
+apiVersion: {{ ternary "autoscaling/v2" "autoscaling/v2beta2" (.Capabilities.APIVersions.Has "autoscaling/v2") }}
+kind: HorizontalPodAutoscaler
+metadata:
+ {{- with .Values.defaultBackend.autoscaling.annotations }}
+ annotations: {{ toYaml . | nindent 4 }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: default-backend
+ {{- with .Values.defaultBackend.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.defaultBackend.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: {{ include "ingress-nginx.defaultBackend.fullname" . }}
+ minReplicas: {{ .Values.defaultBackend.autoscaling.minReplicas }}
+ maxReplicas: {{ .Values.defaultBackend.autoscaling.maxReplicas }}
+ metrics:
+ {{- with .Values.defaultBackend.autoscaling.targetCPUUtilizationPercentage }}
+ - type: Resource
+ resource:
+ name: cpu
+ target:
+ type: Utilization
+ averageUtilization: {{ . }}
+ {{- end }}
+ {{- with .Values.defaultBackend.autoscaling.targetMemoryUtilizationPercentage }}
+ - type: Resource
+ resource:
+ name: memory
+ target:
+ type: Utilization
+ averageUtilization: {{ . }}
+ {{- end }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml b/charts/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml
new file mode 100644
index 0000000..00891ce
--- /dev/null
+++ b/charts/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.defaultBackend.enabled -}}
+{{- if or (gt (.Values.defaultBackend.replicaCount | int) 1) (gt (.Values.defaultBackend.autoscaling.minReplicas | int) 1) }}
+apiVersion: {{ ternary "policy/v1" "policy/v1beta1" (semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version) }}
+kind: PodDisruptionBudget
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: default-backend
+ {{- with .Values.defaultBackend.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.defaultBackend.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 6 }}
+ app.kubernetes.io/component: default-backend
+ minAvailable: {{ .Values.defaultBackend.minAvailable }}
+{{- end }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/default-backend-psp.yaml b/charts/ingress-nginx/templates/default-backend-psp.yaml
new file mode 100644
index 0000000..c144c8f
--- /dev/null
+++ b/charts/ingress-nginx/templates/default-backend-psp.yaml
@@ -0,0 +1,38 @@
+{{- if (semverCompare "<1.25.0-0" .Capabilities.KubeVersion.Version) }}
+{{- if and .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled (empty .Values.defaultBackend.existingPsp) -}}
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-backend
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: default-backend
+ {{- with .Values.defaultBackend.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ allowPrivilegeEscalation: false
+ fsGroup:
+ ranges:
+ - max: 65535
+ min: 1
+ rule: MustRunAs
+ requiredDropCapabilities:
+ - ALL
+ runAsUser:
+ rule: MustRunAsNonRoot
+ seLinux:
+ rule: RunAsAny
+ supplementalGroups:
+ ranges:
+ - max: 65535
+ min: 1
+ rule: MustRunAs
+ volumes:
+ - configMap
+ - emptyDir
+ - projected
+ - secret
+ - downwardAPI
+{{- end }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/default-backend-role.yaml b/charts/ingress-nginx/templates/default-backend-role.yaml
new file mode 100644
index 0000000..a2b457c
--- /dev/null
+++ b/charts/ingress-nginx/templates/default-backend-role.yaml
@@ -0,0 +1,22 @@
+{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: default-backend
+ {{- with .Values.defaultBackend.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.fullname" . }}-backend
+ namespace: {{ .Release.Namespace }}
+rules:
+ - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}]
+ resources: ['podsecuritypolicies']
+ verbs: ['use']
+ {{- with .Values.defaultBackend.existingPsp }}
+ resourceNames: [{{ . }}]
+ {{- else }}
+ resourceNames: [{{ include "ingress-nginx.fullname" . }}-backend]
+ {{- end }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/default-backend-rolebinding.yaml b/charts/ingress-nginx/templates/default-backend-rolebinding.yaml
new file mode 100644
index 0000000..dbaa516
--- /dev/null
+++ b/charts/ingress-nginx/templates/default-backend-rolebinding.yaml
@@ -0,0 +1,21 @@
+{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: default-backend
+ {{- with .Values.defaultBackend.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.fullname" . }}-backend
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ include "ingress-nginx.fullname" . }}-backend
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace | quote }}
+{{- end }}
diff --git a/charts/ingress-nginx/templates/default-backend-service.yaml b/charts/ingress-nginx/templates/default-backend-service.yaml
new file mode 100644
index 0000000..5f1d09a
--- /dev/null
+++ b/charts/ingress-nginx/templates/default-backend-service.yaml
@@ -0,0 +1,41 @@
+{{- if .Values.defaultBackend.enabled -}}
+apiVersion: v1
+kind: Service
+metadata:
+{{- if .Values.defaultBackend.service.annotations }}
+ annotations: {{ toYaml .Values.defaultBackend.service.annotations | nindent 4 }}
+{{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: default-backend
+ {{- with .Values.defaultBackend.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.defaultBackend.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: {{ .Values.defaultBackend.service.type }}
+{{- if .Values.defaultBackend.service.clusterIP }}
+ clusterIP: {{ .Values.defaultBackend.service.clusterIP }}
+{{- end }}
+{{- if .Values.defaultBackend.service.externalIPs }}
+ externalIPs: {{ toYaml .Values.defaultBackend.service.externalIPs | nindent 4 }}
+{{- end }}
+{{- if .Values.defaultBackend.service.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.defaultBackend.service.loadBalancerIP }}
+{{- end }}
+{{- if .Values.defaultBackend.service.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges: {{ toYaml .Values.defaultBackend.service.loadBalancerSourceRanges | nindent 4 }}
+{{- end }}
+ ports:
+ - name: http
+ port: {{ .Values.defaultBackend.service.servicePort }}
+ protocol: TCP
+ targetPort: http
+ {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }}
+ appProtocol: http
+ {{- end }}
+ selector:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 4 }}
+ app.kubernetes.io/component: default-backend
+{{- end }}
diff --git a/charts/ingress-nginx/templates/default-backend-serviceaccount.yaml b/charts/ingress-nginx/templates/default-backend-serviceaccount.yaml
new file mode 100644
index 0000000..b45a95a
--- /dev/null
+++ b/charts/ingress-nginx/templates/default-backend-serviceaccount.yaml
@@ -0,0 +1,14 @@
+{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: default-backend
+ {{- with .Values.defaultBackend.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+automountServiceAccountToken: {{ .Values.defaultBackend.serviceAccount.automountServiceAccountToken }}
+{{- end }}
diff --git a/charts/ingress-nginx/values.yaml b/charts/ingress-nginx/values.yaml
new file mode 100644
index 0000000..7ca41e7
--- /dev/null
+++ b/charts/ingress-nginx/values.yaml
@@ -0,0 +1,899 @@
+## nginx configuration
+## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/index.md
+##
+
+## Overrides for generated resource names
+# See templates/_helpers.tpl
+# nameOverride:
+# fullnameOverride:
+
+## Labels to apply to all resources
+##
+commonLabels: {}
+# scmhash: abc123
+# myLabel: aakkmd
+
+controller:
+ name: controller
+ image:
+ ## Keep false as default for now!
+ chroot: false
+ registry: registry.k8s.io
+ image: ingress-nginx/controller
+ ## for backwards compatibility consider setting the full image url via the repository value below
+ ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail
+ ## repository:
+ tag: "v1.8.0"
+ digest: sha256:744ae2afd433a395eeb13dc03d3313facba92e96ad71d9feaafc85925493fee3
+ digestChroot: sha256:a45e41cd2b7670adf829759878f512d4208d0aec1869dae593a0fecd09a5e49e
+ pullPolicy: IfNotPresent
+ # www-data -> uid 101
+ runAsUser: 101
+ allowPrivilegeEscalation: true
+ # -- Use an existing PSP instead of creating one
+ existingPsp: ""
+ # -- Configures the controller container name
+ containerName: controller
+ # -- Configures the ports that the nginx-controller listens on
+ containerPort:
+ http: 80
+ https: 443
+ # -- Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/
+ config: {}
+ # -- Annotations to be added to the controller config configuration configmap.
+ configAnnotations: {}
+ # -- Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers
+ proxySetHeaders: {}
+ # -- Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers
+ addHeaders: {}
+ # -- Optionally customize the pod dnsConfig.
+ dnsConfig: {}
+ # -- Optionally customize the pod hostname.
+ hostname: {}
+ # -- Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'.
+ # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller
+ # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet.
+ dnsPolicy: ClusterFirst
+ # -- Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network
+ # Ingress status was blank because there is no Service exposing the Ingress-Nginx Controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply
+ reportNodeInternalIp: false
+ # -- Process Ingress objects without ingressClass annotation/ingressClassName field
+ # Overrides value for --watch-ingress-without-class flag of the controller binary
+ # Defaults to false
+ watchIngressWithoutClass: false
+ # -- Process IngressClass per name (additionally as per spec.controller).
+ ingressClassByName: false
+ # -- This configuration enables Topology Aware Routing feature, used together with service annotation service.kubernetes.io/topology-aware-hints="auto"
+ # Defaults to false
+ enableTopologyAwareRouting: false
+ # -- This configuration defines if Ingress Controller should allow users to set
+ # their own *-snippet annotations, otherwise this is forbidden / dropped
+ # when users add those annotations.
+ # Global snippets in ConfigMap are still respected
+ allowSnippetAnnotations: true
+ # -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm),
+ # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920
+ # is merged
+ hostNetwork: false
+ ## Use host ports 80 and 443
+ ## Disabled by default
+ hostPort:
+ # -- Enable 'hostPort' or not
+ enabled: false
+ ports:
+ # -- 'hostPort' http port
+ http: 80
+ # -- 'hostPort' https port
+ https: 443
+ # -- Election ID to use for status update, by default it uses the controller name combined with a suffix of 'leader'
+ electionID: ""
+ ## This section refers to the creation of the IngressClass resource
+ ## IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19
+ ingressClassResource:
+ # -- Name of the ingressClass
+ name: nginx
+ # -- Is this ingressClass enabled or not
+ enabled: true
+ # -- Is this the default ingressClass for the cluster
+ default: false
+ # -- Controller-value of the controller that is processing this ingressClass
+ controllerValue: "k8s.io/ingress-nginx"
+ # -- Parameters is a link to a custom resource containing additional
+ # configuration for the controller. This is optional if the controller
+ # does not require extra parameters.
+ parameters: {}
+ # -- For backwards compatibility with ingress.class annotation, use ingressClass.
+ # Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation
+ ingressClass: nginx
+ # -- Labels to add to the pod container metadata
+ podLabels: {}
+ # key: value
+
+ # -- Security Context policies for controller pods
+ podSecurityContext: {}
+ # -- See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls
+ sysctls: {}
+ # sysctls:
+ # "net.core.somaxconn": "8192"
+
+ # -- Allows customization of the source of the IP address or FQDN to report
+ # in the ingress status field. By default, it reads the information provided
+ # by the service. If disable, the status field reports the IP address of the
+ # node or nodes where an ingress controller pod is running.
+ publishService:
+ # -- Enable 'publishService' or not
+ enabled: true
+ # -- Allows overriding of the publish service to bind to
+ # Must be <namespace>/<service_name>
+ pathOverride: ""
+ # Limit the scope of the controller to a specific namespace
+ scope:
+ # -- Enable 'scope' or not
+ enabled: false
+ # -- Namespace to limit the controller to; defaults to $(POD_NAMESPACE)
+ namespace: ""
+ # -- When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels
+ # only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces.
+ namespaceSelector: ""
+ # -- Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE)
+ configMapNamespace: ""
+ tcp:
+ # -- Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE)
+ configMapNamespace: ""
+ # -- Annotations to be added to the tcp config configmap
+ annotations: {}
+ udp:
+ # -- Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE)
+ configMapNamespace: ""
+ # -- Annotations to be added to the udp config configmap
+ annotations: {}
+ # -- Maxmind license key to download GeoLite2 Databases.
+ ## https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases
+ maxmindLicenseKey: ""
+ # -- Additional command line arguments to pass to Ingress-Nginx Controller
+ # E.g. to specify the default SSL certificate you can use
+ extraArgs: {}
+ ## extraArgs:
+ ## default-ssl-certificate: "<namespace>/<secret_name>"
+
+ # -- Additional environment variables to set
+ extraEnvs: []
+ # extraEnvs:
+ # - name: FOO
+ # valueFrom:
+ # secretKeyRef:
+ # key: FOO
+ # name: secret-resource
+
+ # -- Use a `DaemonSet` or `Deployment`
+ kind: Deployment
+ # -- Annotations to be added to the controller Deployment or DaemonSet
+ ##
+ annotations: {}
+ # keel.sh/pollSchedule: "@every 60m"
+
+ # -- Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels
+ ##
+ labels: {}
+ # keel.sh/policy: patch
+ # keel.sh/trigger: poll
+
+ # -- The update strategy to apply to the Deployment or DaemonSet
+ ##
+ updateStrategy: {}
+ # rollingUpdate:
+ # maxUnavailable: 1
+ # type: RollingUpdate
+
+ # -- `minReadySeconds` to avoid killing pods before we are ready
+ ##
+ minReadySeconds: 0
+ # -- Node tolerations for server scheduling to nodes with taints
+ ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ ##
+ tolerations: []
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+ # -- Affinity and anti-affinity rules for server scheduling to nodes
+ ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ##
+ affinity: {}
+ # # An example of preferred pod anti-affinity, weight is in the range 1-100
+ # podAntiAffinity:
+ # preferredDuringSchedulingIgnoredDuringExecution:
+ # - weight: 100
+ # podAffinityTerm:
+ # labelSelector:
+ # matchExpressions:
+ # - key: app.kubernetes.io/name
+ # operator: In
+ # values:
+ # - ingress-nginx
+ # - key: app.kubernetes.io/instance
+ # operator: In
+ # values:
+ # - ingress-nginx
+ # - key: app.kubernetes.io/component
+ # operator: In
+ # values:
+ # - controller
+ # topologyKey: kubernetes.io/hostname
+
+ # # An example of required pod anti-affinity
+ # podAntiAffinity:
+ # requiredDuringSchedulingIgnoredDuringExecution:
+ # - labelSelector:
+ # matchExpressions:
+ # - key: app.kubernetes.io/name
+ # operator: In
+ # values:
+ # - ingress-nginx
+ # - key: app.kubernetes.io/instance
+ # operator: In
+ # values:
+ # - ingress-nginx
+ # - key: app.kubernetes.io/component
+ # operator: In
+ # values:
+ # - controller
+ # topologyKey: "kubernetes.io/hostname"
+
+ # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in.
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+ ##
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+ # labelSelector:
+ # matchLabels:
+ # app.kubernetes.io/instance: ingress-nginx-internal
+
+ # -- `terminationGracePeriodSeconds` to avoid killing pods before we are ready
+ ## wait up to five minutes for the drain of connections
+ ##
+ terminationGracePeriodSeconds: 300
+ # -- Node labels for controller pod assignment
+ ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector:
+ kubernetes.io/os: linux
+ ## Liveness and readiness probe values
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
+ ##
+ ## startupProbe:
+ ## httpGet:
+ ## # should match container.healthCheckPath
+ ## path: "/healthz"
+ ## port: 10254
+ ## scheme: HTTP
+ ## initialDelaySeconds: 5
+ ## periodSeconds: 5
+ ## timeoutSeconds: 2
+ ## successThreshold: 1
+ ## failureThreshold: 5
+ livenessProbe:
+ httpGet:
+ # should match container.healthCheckPath
+ path: "/healthz"
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 5
+ readinessProbe:
+ httpGet:
+ # should match container.healthCheckPath
+ path: "/healthz"
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+ # -- Path of the health check endpoint. All requests received on the port defined by
+ # the healthz-port parameter are forwarded internally to this path.
+ healthCheckPath: "/healthz"
+ # -- Address to bind the health check endpoint.
+ # It is better to set this option to the internal node address
+ # if the Ingress-Nginx Controller is running in the `hostNetwork: true` mode.
+ healthCheckHost: ""
+ # -- Annotations to be added to controller pods
+ ##
+ podAnnotations: {}
+ replicaCount: 1
+ # -- Minimum available pods set in PodDisruptionBudget.
+ # Define either 'minAvailable' or 'maxUnavailable', never both.
+ minAvailable: 1
+ # -- Maximum unavalaile pods set in PodDisruptionBudget. If set, 'minAvailable' is ignored.
+ # maxUnavailable: 1
+
+ ## Define requests resources to avoid probe issues due to CPU utilization in busy nodes
+ ## ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903
+ ## Ideally, there should be no limits.
+ ## https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/
+ resources:
+ ## limits:
+ ## cpu: 100m
+ ## memory: 90Mi
+ requests:
+ cpu: 100m
+ memory: 90Mi
+ # Mutually exclusive with keda autoscaling
+ autoscaling:
+ enabled: false
+ annotations: {}
+ minReplicas: 1
+ maxReplicas: 11
+ targetCPUUtilizationPercentage: 50
+ targetMemoryUtilizationPercentage: 50
+ behavior: {}
+ # scaleDown:
+ # stabilizationWindowSeconds: 300
+ # policies:
+ # - type: Pods
+ # value: 1
+ # periodSeconds: 180
+ # scaleUp:
+ # stabilizationWindowSeconds: 300
+ # policies:
+ # - type: Pods
+ # value: 2
+ # periodSeconds: 60
+ autoscalingTemplate: []
+ # Custom or additional autoscaling metrics
+ # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics
+ # - type: Pods
+ # pods:
+ # metric:
+ # name: nginx_ingress_controller_nginx_process_requests_total
+ # target:
+ # type: AverageValue
+ # averageValue: 10000m
+
+ # Mutually exclusive with hpa autoscaling
+ keda:
+ apiVersion: "keda.sh/v1alpha1"
+ ## apiVersion changes with keda 1.x vs 2.x
+ ## 2.x = keda.sh/v1alpha1
+ ## 1.x = keda.k8s.io/v1alpha1
+ enabled: false
+ minReplicas: 1
+ maxReplicas: 11
+ pollingInterval: 30
+ cooldownPeriod: 300
+ restoreToOriginalReplicaCount: false
+ scaledObject:
+ annotations: {}
+ # Custom annotations for ScaledObject resource
+ # annotations:
+ # key: value
+ triggers: []
+ # - type: prometheus
+ # metadata:
+ # serverAddress: http://<prometheus-host>:9090
+ # metricName: http_requests_total
+ # threshold: '100'
+ # query: sum(rate(http_requests_total{deployment="my-deployment"}[2m]))
+
+ behavior: {}
+ # scaleDown:
+ # stabilizationWindowSeconds: 300
+ # policies:
+ # - type: Pods
+ # value: 1
+ # periodSeconds: 180
+ # scaleUp:
+ # stabilizationWindowSeconds: 300
+ # policies:
+ # - type: Pods
+ # value: 2
+ # periodSeconds: 60
+ # -- Enable mimalloc as a drop-in replacement for malloc.
+ ## ref: https://github.com/microsoft/mimalloc
+ ##
+ enableMimalloc: true
+ ## Override NGINX template
+ customTemplate:
+ configMapName: ""
+ configMapKey: ""
+ service:
+ enabled: true
+ # -- If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were
+ # using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
+ # It allows choosing the protocol for each backend specified in the Kubernetes service.
+ # See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244
+ # Will be ignored for Kubernetes versions older than 1.20
+ ##
+ appProtocol: true
+ annotations: {}
+ labels: {}
+ # clusterIP: ""
+
+ # -- List of IP addresses at which the controller services are available
+ ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+ ##
+ externalIPs: []
+ # -- Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer
+ loadBalancerIP: ""
+ loadBalancerSourceRanges: []
+ enableHttp: true
+ enableHttps: true
+ ## Set external traffic policy to: "Local" to preserve source IP on providers supporting it.
+ ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
+ # externalTrafficPolicy: ""
+
+ ## Must be either "None" or "ClientIP" if set. Kubernetes will default to "None".
+ ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ # sessionAffinity: ""
+
+ ## Specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified,
+ ## the service controller allocates a port from your cluster’s NodePort range.
+ ## Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ # healthCheckNodePort: 0
+
+ # -- Represents the dual-stack-ness requested or required by this Service. Possible values are
+ # SingleStack, PreferDualStack or RequireDualStack.
+ # The ipFamilies and clusterIPs fields depend on the value of this field.
+ ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/
+ ipFamilyPolicy: "SingleStack"
+ # -- List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically
+ # based on cluster configuration and the ipFamilyPolicy field.
+ ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/
+ ipFamilies:
+ - IPv4
+ ports:
+ http: 80
+ https: 443
+ targetPorts:
+ http: http
+ https: https
+ type: LoadBalancer
+ ## type: NodePort
+ ## nodePorts:
+ ## http: 32080
+ ## https: 32443
+ ## tcp:
+ ## 8080: 32808
+ nodePorts:
+ http: ""
+ https: ""
+ tcp: {}
+ udp: {}
+ external:
+ enabled: true
+ internal:
+ # -- Enables an additional internal load balancer (besides the external one).
+ enabled: false
+ # -- Annotations are mandatory for the load balancer to come up. Varies with the cloud service.
+ annotations: {}
+ # loadBalancerIP: ""
+
+ # -- Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0.
+ loadBalancerSourceRanges: []
+ ## Set external traffic policy to: "Local" to preserve source IP on
+ ## providers supporting it
+ ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
+ # externalTrafficPolicy: ""
+
+ # -- Custom port mapping for internal service
+ ports: {}
+ # http: 80
+ # https: 443
+
+ # -- Custom target port mapping for internal service
+ targetPorts: {}
+ # http: http
+ # https: https
+ # shareProcessNamespace enables process namespace sharing within the pod.
+ # This can be used for example to signal log rotation using `kill -USR1` from a sidecar.
+ shareProcessNamespace: false
+ # -- Additional containers to be added to the controller pod.
+ # See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example.
+ extraContainers: []
+ # - name: my-sidecar
+ # image: nginx:latest
+ # - name: lemonldap-ng-controller
+ # image: lemonldapng/lemonldap-ng-controller:0.2.0
+ # args:
+ # - /lemonldap-ng-controller
+ # - --alsologtostderr
+ # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration
+ # env:
+ # - name: POD_NAME
+ # valueFrom:
+ # fieldRef:
+ # fieldPath: metadata.name
+ # - name: POD_NAMESPACE
+ # valueFrom:
+ # fieldRef:
+ # fieldPath: metadata.namespace
+ # volumeMounts:
+ # - name: copy-portal-skins
+ # mountPath: /srv/var/lib/lemonldap-ng/portal/skins
+
+ # -- Additional volumeMounts to the controller main container.
+ extraVolumeMounts: []
+ # - name: copy-portal-skins
+ # mountPath: /var/lib/lemonldap-ng/portal/skins
+
+ # -- Additional volumes to the controller pod.
+ extraVolumes: []
+ # - name: copy-portal-skins
+ # emptyDir: {}
+
+ # -- Containers, which are run before the app containers are started.
+ extraInitContainers: []
+ # - name: init-myservice
+ # image: busybox
+ # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;']
+
+ # -- Modules, which are mounted into the core nginx image. See values.yaml for a sample to add opentelemetry module
+ extraModules: []
+ # - name: mytestmodule
+ # image: registry.k8s.io/ingress-nginx/mytestmodule
+ # containerSecurityContext:
+ # allowPrivilegeEscalation: false
+ #
+ # The image must contain a `/usr/local/bin/init_module.sh` executable, which
+ # will be executed as initContainers, to move its config files within the
+ # mounted volume.
+
+ opentelemetry:
+ enabled: false
+ image: registry.k8s.io/ingress-nginx/opentelemetry:v20230527@sha256:fd7ec835f31b7b37187238eb4fdad4438806e69f413a203796263131f4f02ed0
+ containerSecurityContext:
+ allowPrivilegeEscalation: false
+ admissionWebhooks:
+ annotations: {}
+ # ignore-check.kube-linter.io/no-read-only-rootfs: "This deployment needs write access to root filesystem".
+
+ ## Additional annotations to the admission webhooks.
+ ## These annotations will be added to the ValidatingWebhookConfiguration and
+ ## the Jobs Spec of the admission webhooks.
+ enabled: true
+ # -- Additional environment variables to set
+ extraEnvs: []
+ # extraEnvs:
+ # - name: FOO
+ # valueFrom:
+ # secretKeyRef:
+ # key: FOO
+ # name: secret-resource
+ # -- Admission Webhook failure policy to use
+ failurePolicy: Fail
+ # timeoutSeconds: 10
+ port: 8443
+ certificate: "/usr/local/certificates/cert"
+ key: "/usr/local/certificates/key"
+ namespaceSelector: {}
+ objectSelector: {}
+ # -- Labels to be added to admission webhooks
+ labels: {}
+ # -- Use an existing PSP instead of creating one
+ existingPsp: ""
+ networkPolicyEnabled: false
+ service:
+ annotations: {}
+ # clusterIP: ""
+ externalIPs: []
+ # loadBalancerIP: ""
+ loadBalancerSourceRanges: []
+ servicePort: 443
+ type: ClusterIP
+ createSecretJob:
+ securityContext:
+ allowPrivilegeEscalation: false
+ resources: {}
+ # limits:
+ # cpu: 10m
+ # memory: 20Mi
+ # requests:
+ # cpu: 10m
+ # memory: 20Mi
+ patchWebhookJob:
+ securityContext:
+ allowPrivilegeEscalation: false
+ resources: {}
+ patch:
+ enabled: true
+ image:
+ registry: registry.k8s.io
+ image: ingress-nginx/kube-webhook-certgen
+ ## for backwards compatibility consider setting the full image url via the repository value below
+ ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail
+ ## repository:
+ tag: v20230407
+ digest: sha256:543c40fd093964bc9ab509d3e791f9989963021f1e9e4c9c7b6700b02bfb227b
+ pullPolicy: IfNotPresent
+ # -- Provide a priority class name to the webhook patching job
+ ##
+ priorityClassName: ""
+ podAnnotations: {}
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations: []
+ # -- Labels to be added to patch job resources
+ labels: {}
+ securityContext:
+ runAsNonRoot: true
+ runAsUser: 2000
+ fsGroup: 2000
+ # Use certmanager to generate webhook certs
+ certManager:
+ enabled: false
+ # self-signed root certificate
+ rootCert:
+ # default to be 5y
+ duration: ""
+ admissionCert:
+ # default to be 1y
+ duration: ""
+ # issuerRef:
+ # name: "issuer"
+ # kind: "ClusterIssuer"
+ metrics:
+ port: 10254
+ portName: metrics
+ # if this port is changed, change healthz-port: in extraArgs: accordingly
+ enabled: false
+ service:
+ annotations: {}
+ # prometheus.io/scrape: "true"
+ # prometheus.io/port: "10254"
+ # -- Labels to be added to the metrics service resource
+ labels: {}
+ # clusterIP: ""
+
+ # -- List of IP addresses at which the stats-exporter service is available
+ ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+ ##
+ externalIPs: []
+ # loadBalancerIP: ""
+ loadBalancerSourceRanges: []
+ servicePort: 10254
+ type: ClusterIP
+ # externalTrafficPolicy: ""
+ # nodePort: ""
+ serviceMonitor:
+ enabled: false
+ additionalLabels: {}
+ ## The label to use to retrieve the job name from.
+ ## jobLabel: "app.kubernetes.io/name"
+ namespace: ""
+ namespaceSelector: {}
+ ## Default: scrape .Release.Namespace only
+ ## To scrape all, use the following:
+ ## namespaceSelector:
+ ## any: true
+ scrapeInterval: 30s
+ # honorLabels: true
+ targetLabels: []
+ relabelings: []
+ metricRelabelings: []
+ prometheusRule:
+ enabled: false
+ additionalLabels: {}
+ # namespace: ""
+ rules: []
+ # # These are just examples rules, please adapt them to your needs
+ # - alert: NGINXConfigFailed
+ # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0
+ # for: 1s
+ # labels:
+ # severity: critical
+ # annotations:
+ # description: bad ingress config - nginx config test failed
+ # summary: uninstall the latest ingress changes to allow config reloads to resume
+ # - alert: NGINXCertificateExpiry
+ # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800
+ # for: 1s
+ # labels:
+ # severity: critical
+ # annotations:
+ # description: ssl certificate(s) will expire in less then a week
+ # summary: renew expiring certificates to avoid downtime
+ # - alert: NGINXTooMany500s
+ # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
+ # for: 1m
+ # labels:
+ # severity: warning
+ # annotations:
+ # description: Too many 5XXs
+ # summary: More than 5% of all requests returned 5XX, this requires your attention
+ # - alert: NGINXTooMany400s
+ # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
+ # for: 1m
+ # labels:
+ # severity: warning
+ # annotations:
+ # description: Too many 4XXs
+ # summary: More than 5% of all requests returned 4XX, this requires your attention
+ # -- Improve connection draining when ingress controller pod is deleted using a lifecycle hook:
+ # With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds
+ # to 300, allowing the draining of connections up to five minutes.
+ # If the active connections end before that, the pod will terminate gracefully at that time.
+ # To effectively take advantage of this feature, the Configmap feature
+ # worker-shutdown-timeout new value is 240s instead of 10s.
+ ##
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /wait-shutdown
+ priorityClassName: ""
+# -- Rollback limit
+##
+revisionHistoryLimit: 10
+## Default 404 backend
+##
+defaultBackend:
+ ##
+ enabled: false
+ name: defaultbackend
+ image:
+ registry: registry.k8s.io
+ image: defaultbackend-amd64
+ ## for backwards compatibility consider setting the full image url via the repository value below
+ ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail
+ ## repository:
+ tag: "1.5"
+ pullPolicy: IfNotPresent
+ # nobody user -> uid 65534
+ runAsUser: 65534
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ # -- Use an existing PSP instead of creating one
+ existingPsp: ""
+ extraArgs: {}
+ serviceAccount:
+ create: true
+ name: ""
+ automountServiceAccountToken: true
+ # -- Additional environment variables to set for defaultBackend pods
+ extraEnvs: []
+ port: 8080
+ ## Readiness and liveness probes for default backend
+ ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
+ ##
+ livenessProbe:
+ failureThreshold: 3
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+ readinessProbe:
+ failureThreshold: 6
+ initialDelaySeconds: 0
+ periodSeconds: 5
+ successThreshold: 1
+ timeoutSeconds: 5
+ # -- The update strategy to apply to the Deployment or DaemonSet
+ ##
+ updateStrategy: {}
+ # rollingUpdate:
+ # maxUnavailable: 1
+ # type: RollingUpdate
+
+ # -- `minReadySeconds` to avoid killing pods before we are ready
+ ##
+ minReadySeconds: 0
+ # -- Node tolerations for server scheduling to nodes with taints
+ ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ ##
+ tolerations: []
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+ affinity: {}
+ # -- Security Context policies for controller pods
+ # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for
+ # notes on enabling and using sysctls
+ ##
+ podSecurityContext: {}
+ # -- Security Context policies for controller main container.
+ # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for
+ # notes on enabling and using sysctls
+ ##
+ containerSecurityContext: {}
+ # -- Labels to add to the pod container metadata
+ podLabels: {}
+ # key: value
+
+ # -- Node labels for default backend pod assignment
+ ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector:
+ kubernetes.io/os: linux
+ # -- Annotations to be added to default backend pods
+ ##
+ podAnnotations: {}
+ replicaCount: 1
+ minAvailable: 1
+ resources: {}
+ # limits:
+ # cpu: 10m
+ # memory: 20Mi
+ # requests:
+ # cpu: 10m
+ # memory: 20Mi
+
+ extraVolumeMounts: []
+ ## Additional volumeMounts to the default backend container.
+ # - name: copy-portal-skins
+ # mountPath: /var/lib/lemonldap-ng/portal/skins
+
+ extraVolumes: []
+ ## Additional volumes to the default backend pod.
+ # - name: copy-portal-skins
+ # emptyDir: {}
+
+ autoscaling:
+ annotations: {}
+ enabled: false
+ minReplicas: 1
+ maxReplicas: 2
+ targetCPUUtilizationPercentage: 50
+ targetMemoryUtilizationPercentage: 50
+ service:
+ annotations: {}
+ # clusterIP: ""
+
+ # -- List of IP addresses at which the default backend service is available
+ ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+ ##
+ externalIPs: []
+ # loadBalancerIP: ""
+ loadBalancerSourceRanges: []
+ servicePort: 80
+ type: ClusterIP
+ priorityClassName: ""
+ # -- Labels to be added to the default backend resources
+ labels: {}
+## Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266
+rbac:
+ create: true
+ scope: false
+## If true, create & use Pod Security Policy resources
+## https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+podSecurityPolicy:
+ enabled: false
+serviceAccount:
+ create: true
+ name: ""
+ automountServiceAccountToken: true
+ # -- Annotations for the controller service account
+ annotations: {}
+# -- Optional array of imagePullSecrets containing private registry credentials
+## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+imagePullSecrets: []
+# - name: secretName
+
+# -- TCP service key-value pairs
+## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md
+##
+tcp: {}
+# 8080: "default/example-tcp-svc:9000"
+
+# -- UDP service key-value pairs
+## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md
+##
+udp: {}
+# 53: "kube-system/kube-dns:53"
+
+# -- Prefix for TCP and UDP ports names in ingress controller service
+## Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration
+portNamePrefix: ""
+# -- (string) A base64-encoded Diffie-Hellman parameter.
+# This can be generated with: `openssl dhparam 4096 2> /dev/null | base64`
+## Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param
+dhParam: ""
diff --git a/charts/ingress/.helmignore b/charts/ingress/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/ingress/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/ingress/Chart.yaml b/charts/ingress/Chart.yaml
new file mode 100644
index 0000000..28df2c4
--- /dev/null
+++ b/charts/ingress/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: rpuppy
+description: A Helm chart to configure ingress
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/ingress/templates/install.yaml b/charts/ingress/templates/install.yaml
new file mode 100644
index 0000000..c50a741
--- /dev/null
+++ b/charts/ingress/templates/install.yaml
@@ -0,0 +1,39 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: ingress-{{ .Values.domain }}
+ namespace: {{ .Release.Namespace }}
+ {{- if or .Values.certificateIssuer .Values.appRoot }}
+ annotations:
+ {{- if .Values.certificateIssuer }}
+ acme.cert-manager.io/http01-edit-in-place: "true"
+ cert-manager.io/cluster-issuer: {{ .Values.certificateIssuer }}
+ # nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
+ {{- end }}
+ {{- if .Values.appRoot }}
+ nginx.ingress.kubernetes.io/app-root: {{ .Values.appRoot }}
+ {{- end }}
+ {{- end }}
+spec:
+ ingressClassName: {{ .Values.ingressClassName }}
+ {{- if .Values.certificateIssuer }}
+ tls:
+ - hosts:
+ - {{ .Values.domain }}
+ secretName: cert-{{ .Values.domain }}
+ {{- end }}
+ rules:
+ - host: {{ .Values.domain }}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: {{ .Values.service.name }}
+ port:
+ {{- if .Values.service.port.name }}
+ name: {{ .Values.service.port.name }}
+ {{- else }}
+ number: {{ .Values.service.port.number }}
+ {{- end}}
diff --git a/charts/ingress/values.yaml b/charts/ingress/values.yaml
new file mode 100644
index 0000000..0640557
--- /dev/null
+++ b/charts/ingress/values.yaml
@@ -0,0 +1,9 @@
+ingressClassName: ingress-public
+certificateIssuer: example-public
+domain: woof.example.com
+appRoot: ""
+service:
+ name: woof
+ port:
+ number: 80
+ name: ""
diff --git a/charts/jellyfin/.helmignore b/charts/jellyfin/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/jellyfin/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/jellyfin/Chart.yaml b/charts/jellyfin/Chart.yaml
new file mode 100644
index 0000000..e0ba9ca
--- /dev/null
+++ b/charts/jellyfin/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: jellyfin
+description: A Helm chart to deploy Jellyfin media server on PCloud
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/jellyfin/templates/deploy.yaml b/charts/jellyfin/templates/deploy.yaml
new file mode 100644
index 0000000..f98a56e
--- /dev/null
+++ b/charts/jellyfin/templates/deploy.yaml
@@ -0,0 +1,138 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: jellyfin
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: jellyfin
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: ingress
+ namespace: {{ .Release.Namespace }}
+spec:
+ ingressClassName: {{ .Values.ingress.className }}
+ tls:
+ - hosts:
+ - {{ .Values.ingress.domain }}
+ rules:
+ - host: {{ .Values.ingress.domain }}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: jellyfin
+ port:
+ name: http
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: jellyfin
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: jellyfin
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: jellyfin
+ spec:
+ volumes:
+ - name: data
+ persistentVolumeClaim:
+ claimName: qbittorrent-data
+ - name: config
+ persistentVolumeClaim:
+ claimName: jellyfin-config
+ - name: cache
+ persistentVolumeClaim:
+ claimName: jellyfin-cache
+ containers:
+ - name: jellyfin
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: spec.nodeName
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ ports:
+ - name: http
+ containerPort: 8096
+ protocol: TCP
+ volumeMounts:
+ - name: data
+ mountPath: /data/media
+ readOnly: true
+ - name: config
+ mountPath: /config
+ readOnly: false
+ - name: cache
+ mountPath: /cache
+ readOnly: false
+ resources:
+ requests:
+ # memory: "10Mi"
+ cpu: "2500m"
+ # limits:
+ # memory: "20Mi"
+ # cpu: "100m"
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: qbittorrent-data
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 10Gi
+ volumeName: qbittorrent-data
+ storageClassName: ""
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: jellyfin-config
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.storage.configSize }}
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: jellyfin-cache
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.storage.configSize }}
diff --git a/charts/jellyfin/templates/pv.yaml b/charts/jellyfin/templates/pv.yaml
new file mode 100644
index 0000000..311170f
--- /dev/null
+++ b/charts/jellyfin/templates/pv.yaml
@@ -0,0 +1,23 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: qbittorrent-data
+spec:
+ capacity:
+ storage: 1000Gi
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ mountOptions:
+ - dir_mode=0777
+ - file_mode=0777
+ - vers=3.0
+ csi:
+ driver: smb.csi.k8s.io
+ readOnly: false
+ volumeHandle: {{ .Values.pcloudInstanceId }}-qbittorrent-data
+ volumeAttributes:
+ source: "//samba.{{ .Values.namespacePrefix }}app-torrent.svc.cluster.local/share"
+ nodeStageSecretRef:
+ name: qbittorrent-samba-creds
+ namespace: {{ .Release.Namespace }}
diff --git a/charts/jellyfin/values.yaml b/charts/jellyfin/values.yaml
new file mode 100644
index 0000000..349d625
--- /dev/null
+++ b/charts/jellyfin/values.yaml
@@ -0,0 +1,11 @@
+pcloudInstanceId: example
+image:
+ repository: jellyfin/jellyfin
+ tag: 10.8.10
+ pullPolicy: IfNotPresent
+ingress:
+ className: example-ingress-private
+ domain: jellyfin.p.example.com
+storage:
+ configSize: 10Gi
+ cacheSize: 20Gi
diff --git a/charts/jenkins/CHANGELOG.md b/charts/jenkins/CHANGELOG.md
new file mode 100644
index 0000000..7fb10f6
--- /dev/null
+++ b/charts/jenkins/CHANGELOG.md
@@ -0,0 +1,2873 @@
+# Changelog
+
+This file documents all notable changes to the Jenkins Helm Chart.
+The release numbering uses [semantic versioning](http://semver.org).
+
+Use the following links to reference issues, PRs, and commits prior to v2.6.0.
+
+* Issue: `https://github.com/helm/charts/issues/[issue#]`
+* PR: `https://github.com/helm/charts/pull/[pr#]`
+* Commit: `https://github.com/helm/charts/commit/[commit]/stable/jenkins`
+
+The changelog until v1.5.7 was auto-generated based on git commits.
+Those entries include a reference to the git commit to be able to get more details.
+
+## 5.1.5
+
+Fix Prometheus controller name.
+
+## 5.1.4
+
+Update `docker.io/bats/bats` to version `1.11.0`
+
+## 5.1.3
+
+Update `jenkins/jenkins` to version `2.440.2-jdk17`
+
+## 5.1.2
+
+Update `kubernetes` to version `4203.v1dd44f5b_1cf9`
+
+## 5.1.1
+
+Update `kubernetes` to version `4199.va_1647c280eb_2`
+
+## 5.1.0
+
+Add `agent.restrictedPssSecurityContext` to automatically inject in the jnlp container a securityContext that is suitable for the use of the restricted Pod Security Standard
+
+## 5.0.20
+
+Update `docker.io/kiwigrid/k8s-sidecar` to version `1.26.1`
+
+## 5.0.19
+
+Introduced helm-docs to automatically generate `values.yaml` documentation.
+
+## 5.0.18
+
+Update `kubernetes` to version `4193.vded98e56cc25`
+
+## 5.0.17
+
+Update `docker.io/kiwigrid/k8s-sidecar` to version `1.26.0`
+
+## 5.0.16
+
+Enable support for deleting plugin configuration files at startup.
+
+## 5.0.15
+
+Fixed changelog entries for previous version bumps
+
+
+## 5.0.14
+
+Update `jenkins/jenkins` to version `2.440.1-jdk17`
+
+## 5.0.13
+
+Update `docker.io/kiwigrid/k8s-sidecar` to version `1.25.4`
+
+## 5.0.12
+
+Fix controller.sidecars.additionalSidecarContainers renaming and add tests
+
+## 5.0.11
+
+* Add controller.sidecars.configAutoReload.scheme to specify protocol scheme when connecting Jenkins configuration-as-code reload endpoint
+* Add controller.sidecars.configAutoReload.skipTlsVerify to force the k8s-sidecar container to skip TLS verification when connecting to an HTTPS Jenkins configuration-as-code reload endpoint
+
+## 5.0.10
+
+Update `jenkins/inbound-agent` to version `3206.vb_15dcf73f6a_9-3`
+
+## 5.0.9
+
+Update `kubernetes` to version `4186.v1d804571d5d4`
+
+## 5.0.8
+
+Update `configuration-as-code` to version `1775.v810dc950b_514`
+
+## 5.0.7
+
+Update `docker.io/kiwigrid/k8s-sidecar` to version `docker.io/kiwigrid/k8s-sidecar`
+
+## 5.0.6
+
+Removed `docker.io` prefix from inbound-agent image
+
+## 5.0.5
+
+Prefixed artifacthub.io/images with `docker.io`
+
+## 5.0.4
+
+Updated super-linter to v6. Updated README.md and CHANGELOG.md to fix linting issues.
+
+## 5.0.2
+
+Update `git` to version `5.2.1`
+
+## 5.0.1
+
+Update `docker.io/bats/bats` to version `v1.10.0`
+
+## 5.0.0
+
+ > [!CAUTION]
+ > Several fields have been renamed or removed. See [UPGRADING.md](./UPGRADING.md#to-500)
+
+The Helm Chart is now updated automatically via [Renovate](https://docs.renovatebot.com/)
+
+## 4.12.1
+
+Update Jenkins image and appVersion to jenkins lts release version 2.426.3
+
+## 4.12.0
+
+Add support for [generic ephemeral storage](https://github.com/jenkinsci/kubernetes-plugin/pull/1489) in `agent.volumes` and `agents.workspaceVolume`.
+
+| plugin | old version | new version |
+|------------|---------------------|--------------------|
+| kubernetes | 4029.v5712230ccb_f8 | 4174.v4230d0ccd951 |
+
+## 4.11.2
+
+Fixed documentation for controller.initScripts.
+
+## 4.11.1
+
+Updated helm-unittest and made unittests compatible.
+
+## 4.11.0
+
+Add multi-cloud support.
+
+## 4.10.0
+
+Bumped Jenkins inbound agent from 3107.v665000b_51092-15 to 3192.v713e3b_039fb_e-5.
+
+## 4.9.2
+
+Update Jenkins image and appVersion to jenkins lts release version 2.426.2
+
+
+Notes about [Artifact Hub](https://artifacthub.io/packages/helm/jenkinsci/jenkins?modal=changelog) changelog processing:
+- Remove empty lines
+- Keep only ASCII characters (no emojis)
+- One change per line
+- Remove table(s) (lines starting by "|")
+- Backticks aren't rendered on artifacthub.io changelog
+
+## 4.9.1
+
+Restore artifact hub notes location in CHANGELOG.md
+
+## 4.9.0
+
+Update base images from JDK 11 to JDK 17.
+
+## 4.8.6
+
+Proper `artifacthub.io/changes` changelog annotation preprocessing.
+
+## 4.8.5
+
+Fix `artifacthub.io/changes` changelog annotation added to the released chart.
+
+## 4.8.4
+
+Add `artifacthub.io/changes` changelog annotation to the released chart.
+
+## 4.8.3
+
+Update Jenkins image and appVersion to jenkins lts release version 2.426.1
+
+## 4.8.2
+
+Add the ability to modify `retentionTimeout` and `waitForPodSec` default value in JCasC
+
+## 4.8.1
+
+Reintroduces changes from 4.7.0 (reverted in 4.7.1), with additional fixes:
+
+- METHOD is now allowed in `env` and is not duplicated anymore
+- No calls to JCasC reload endpoint from the init container
+
+## 4.8.0
+
+Adds support for ephemeralStorage request and limit in Kubernetes plugin JCasC template
+
+## 4.7.4
+
+Add the config-init-script checksum into the controller statefullset pod annotations to trigger restart of the pod in case of updated init scripts.
+
+## 4.7.3
+
+Update Jenkins image and appVersion to jenkins lts release version 2.414.3
+
+## 4.7.1
+
+Changes in 4.7.0 were reverted.
+
+## 4.7.0
+
+Runs `config-reload` as an init container, in addition to the sidecar container, to ensure that JCasC YAMLS are present before the main Jenkins container starts. This should fix some race conditions and crashes on startup.
+
+## 4.6.7
+
+Change jenkins-test image label to match the other jenkins images
+
+## 4.6.5
+
+Update Jenkins image and appVersion to jenkins lts release version 2.414.2
+
+## 4.6.4
+
+Introducing TPL function on variables related to hostname in `./charts/jenkins/templates/jenkins-controller-ingress.yaml`
+
+## 4.6.3
+
+Add values to documentation
+
+## 4.6.2
+
+Update word from hundreds to over 1800 to align with blurb at <https://github.com/jenkinsci/>.
+
+## 4.6.1
+
+Update `configuration-as-code` plugin to fix dependency issues with `azure-ad` plugin
+
+## 4.6.0
+
+Added `.Values.controller.httpsKeyStore.jenkinsHttpsJksSecretKey` to allow overriding the default secret key containing the JKS file.
+Added `.Values.controller.httpsKeyStore.jenkinsHttpsJksPasswordSecretName` to allow getting the JKS password from a different secret.
+Added `.Values.controller.httpsKeyStore.jenkinsHttpsJksPasswordSecretKey` to allow overriding the default secret key containing the JKS password.
+
+## 4.5.1
+
+Update Jenkins image and appVersion to jenkins lts release version 2.414.1
+
+
+## 4.5.0
+
+Added `.Values.persistence.dataSource` to allow cloning home PVC from existing dataSource.
+
+## 4.4.2
+
+Update Jenkins image and appVersion to jenkins lts release version 2.401.3
+
+
+## 4.4.1
+
+Added `.Values.agent.jnlpregistry` to allow agents to be configured with private registry.
+
+## 4.4.0
+
+Add config keys for liveness probes on agent containers.
+
+
+## 4.3.30
+
+Update Jenkins version in controller test matching LTS version
+
+## 4.3.29
+
+Update Jenkins image and appVersion to jenkins lts release version 2.401.2
+
+
+## 4.3.28
+
+Allow the kubernetes API server URL to be configurable.
+
+## 4.3.27
+
+Bump kiwigrid/k8s-sidecar from 1.23.1 to 1.24.4 and jenkins/inbound-agent from 3107.v665000b_51092-5 to 3107.v665000b_51092-15.
+
+## 4.3.26
+
+Fix various typos in the chart documentation.
+
+## 4.3.25
+
+| plugin | old version | new version |
+|-----------------------|----------------------|-----------------------|
+| kubernetes | 3900.va_dce992317b_4 | 3937.vd7b_82db_e347b_ |
+| configuration-as-code | 1625.v27444588cc3d | 1647.ve39ca_b_829b_42 |
+| git | 5.0.0 | 5.1.0 |
+| ldap | 671.v2a_9192a_7419d | 682.v7b_544c9d1512 |
+
+## 4.3.24
+
+Update Jenkins image and appVersion to jenkins lts release version 2.401.1
+
+
+## 4.3.23
+
+Update Jenkins image and appVersion to jenkins lts release version 2.387.3
+
+
+## 4.3.22
+
+
+Bump chart version.
+
+## 4.3.21
+
+
+Document building charts for weekly releases.
+
+## 4.3.20
+
+
+Enhance repository appearance and miscellaneous cleanup.
+
+## 4.3.19
+
+
+Comply with superlinter rules and address ShellCheck issues.
+
+## 4.3.18
+
+
+Bump kiwigrid/k8s-sidecar from 1.15.0 to 1.23.1.
+
+## 4.3.17
+
+
+Bump jenkins/inbound-agent from 4.11.2-4 to 3107.v665000b_51092-5.
+
+## 4.3.16
+
+
+Update bundled plugins:
+- [ldap](https://plugins.jenkins.io/ldap/): From 2.5 to 671.v2a_9192a_7419d
+- [kubernetes](https://plugins.jenkins.io/kubernetes/): From 3734.v562b_b_a_627ea_c to 3900.va_dce992317b_4
+- [workflow-aggregator](https://plugins.jenkins.io/workflow-aggregator/): From 590.v6a_d052e5a_a_b_5 to 590.v6a_d052e5a_a_b_5
+- [configuration-as-code](https://plugins.jenkins.io/configuration-as-code/): From 1569.vb_72405b_80249 to 1625.v27444588cc3d
+
+## 4.3.15
+
+
+Update bats from 1.2.1 to 1.9.0.
+
+## 4.3.14
+
+
+Update various GH actions, typo fixes, and miscellaneous chores.
+
+## 4.3.13
+
+
+Bump helm-unittest from 0.2.8 to 0.2.11.
+
+## 4.3.12
+
+
+Update wording in values.yml.
+
+## 4.3.11
+
+Update Jenkins image and appVersion to jenkins lts release version 2.387.2
+
+
+## 4.3.10
+
+Correct incorrect env var definition
+Disable volume mount if disableSecretMount enabled
+
+## 4.3.9
+
+Document `.Values.agent.directConnection` in README.
+Add default value for `.Values.agent.directConnection` to `values.yaml`
+
+## 4.3.8
+
+Added `.Values.agent.directConnection` to allow agents to be configured to connect direct to the JNLP port on the
+controller, preventing the need for an external HTTP endpoint for this purpose.
+
+## 4.3.7
+
+Added `.Values.controller.shareProcessNamespace` and `.Values.controller.httpsKeyStore.disableSecretMount` to enable sourcing TLS certs from external issuers
+
+## 4.3.6
+
+Update Jenkins image and appVersion to jenkins lts release version 2.387.1
+
+## 4.3.5
+
+Added `.Values.helmtest.bats.image` and `.Values.helmtest.bats.image` to allow unit tests to be configurable. Fixes [https://github.com/jenkinsci/helm-charts/issues/683]
+
+## 4.3.4
+
+Update Jenkins image and appVersion to jenkins lts release version 2.375.3
+
+
+## 4.3.3
+
+Removed hardcoding of chart version in tests to make maintenance easier
+
+## 4.3.2
+
+Added `.Values.serviceAccount.extraLabels` on Service Account
+Added `.Values.serviceAccountAgent.extraLabels` on Agent's Service Account
+
+
+## 4.3.0
+
+Moved use of `.Values.containerEnv` within `jenkins` Container to top of `env` block to allow for subsequent Environment Variables to reference these additional ones.
+
+## 4.2.21
+
+Update Jenkins image and appVersion to jenkins lts release version 2.375.2
+
+
+## 4.2.20
+
+Fixed the `controller.prometheus.metricRelabelings` being unable to convert the value to the ServiceMonitor.
+Added `controller.prometheus.relabelings` to allow relabling before scrape.
+Added default values for `controller.prometheus.relabelings` and `controller.prometheus.metricRelabelings`.
+
+## 4.2.19
+
+CronJob API version upgraded to batch/v1
+
+## 4.2.18
+
+Added option to set secretEnvVars.
+
+## 4.2.17
+
+Update Jenkins image and appVersion to jenkins lts release version 2.375.1
+
+
+## 4.2.16
+
+Fixed chart notes not rendering Jenkins URL with prefix when `controller.jenkinsUriPrefix` is set.
+Fixed chart notes not rendering Jenkins URL with `https` when `controller.ingress.tls` or `controller.controller.httpsKeyStore.enable` is set.
+Fixed chart notes rendering wrong JCasC URL when not using `controller.ingress`.
+
+## 4.2.15
+
+Update Jenkins image and appVersion to jenkins lts release version 2.361.4
+
+## 4.2.14
+
+Added option to mount all keys from an existing k8s secret
+
+## 4.2.13
+
+Adding `tpl` to `controller.additionalExistingSecrets`
+
+## 4.2.12
+
+Update Jenkins image and appVersion to jenkins lts release version 2.361.3
+
+
+## 4.2.11
+
+Update default plugin versions
+
+| plugin | old version | new version |
+|-----------------------|-----------------------|------------------------|
+| kubernetes | 3706.vdfb_d599579f3 | 3734.v562b_b_a_627ea_c |
+| git | 4.11.5 | 4.13.0 |
+| configuration-as-code | 1512.vb_79d418d5fc8 | 1569.vb_72405b_80249 |
+
+## 4.2.10
+Fix grammar and typos
+
+## 4.2.9
+Update Jenkins image and appVersion to jenkins lts release version 2.361.2
+
+## 4.2.8
+Modify the condition to trigger copying jenkins_config files when configAutoReload option is disabled during Jenkins initialization
+
+## 4.2.7
+Support for remote URL for configuration
+
+## 4.2.6
+Add option to set hostnetwork for agents
+
+## 4.2.5
+Add an extra optional argument to extraPorts in order to specify targetPort
+
+## 4.2.4
+Remove k8s capibility requirements when setting priority class for controller
+
+## 4.2.3 Update plugin versions
+
+| plugin | old version | new version |
+| --------------------- | --------------------- | --------------------- |
+| kubernetes | 3600.v144b_cd192ca_a_ | 3706.vdfb_d599579f3 |
+| workflow-aggregator | 581.v0c46fa_697ffd | 590.v6a_d052e5a_a_b_5 |
+| configuration-as-code | 1429.v09b_044a_c93de | 1512.vb_79d418d5fc8 |
+| git | 4.11.3 | 4.11.5 |
+
+Resolve version conflict between default install of plugins.
+
+## 4.2.2
+
+Support Google Managed Prometheus
+
+## 4.2.1
+
+Remove option to provide command and args of agent as YAML. This feature was never supported by the Jenkins Kubernetes
+plugin.
+
+## 4.2.0
+
+Add option to provide additional containers to agents
+
+## 4.1.18
+
+Update Jenkins image and appVersion to jenkins lts release version 2.361.1
+
+
+## 4.1.17
+
+Update Jenkins casc default settings to allow `security` configs to be provided
+
+
+## 4.1.16
+
+Update Jenkins image and appVersion to jenkins lts release version 2.346.3
+
+
+## 4.1.15
+
+`projectNamingStrategy` is configurable in default config.
+
+## 4.1.14
+
+If `installPlugins` is disabled, don't create unused plugins volume.
+
+## 4.1.13
+
+Update Jenkins image and appVersion to jenkins lts release version 2.346.2
+
+
+## 4.1.12
+
+If keystore is defined, it is now also made available in the initContainer.
+
+## 4.1.11
+
+JCasC ConfigMaps now generate their name from the `jenkins.casc.configName` helper
+
+## 4.1.10
+
+Update Jenkins image and appVersion to jenkins lts release version 2.346.1
+
+
+## 4.1.9
+
+Allow setting `imagePullSecret` for backup job via `backup.imagePullSecretName`
+
+## 4.1.8
+
+Fix path of projected secrets from `additionalExistingSecrets`.
+
+## 4.1.7
+
+Update README with explanation on the required environmental variable `AWS_REGION` in case of using an S3 bucket.
+
+## 4.1.6
+
+project adminSecret, additionalSecrets and additionalExistingSecrets instead of mount with subPath
+
+## 4.1.5
+
+Update README to fix `JAVA_OPTS` name.
+
+## 4.1.4
+Update plugins
+
+## 4.1.3
+Update jenkins-controller-statefulset projected volumes definition
+
+## 4.1.1
+Added 'controller.prometheus.metricRelabelings' to allow relabling and dropping unused prometheus metrics
+
+## 4.1.0
+
+Added `controller.sidecars.configAutoReload.envFrom`, `controller.initContainerEnvFrom`, `controller.containerEnvFrom`
+
+## 4.0.1
+
+No code changes - CI updated to run unit tests using Helm 3.8.2.
+
+## 4.0.0
+
+Removes automatic `remotingSecurity` setting when using a container tag older than `2.326` (introduced in [`3.11.7`](#3117)). If you're using a version older than `2.326`, you should explicitly set `.controller.legacyRemotingSecurityEnabled` to `true`.
+
+## 3.12.2
+
+Update Jenkins image and appVersion to jenkins lts release version 2.332.3
+
+## 3.12.1
+
+Make namespace configurable for agents and additional agents.
+
+## 3.12.0
+
+Added a flag for disabling the default Jenkins Agent configuration.
+
+## 3.11.10
+
+Update Jenkins image and appVersion to jenkins lts release version 2.332.2
+
+## 3.11.9 Bump configuration-as-code plugin version
+
+| plugin | old version | new version |
+| --------------------- | ----------- | ----------- |
+| configuration-as-code | 1.51 | 1414.v878271fc496f |
+
+## 3.11.8
+
+Make [externalTrafficPolicy](https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies) and `loadBalancerSourceRanges` fields customizable for Agent listener service via `controller.agentListenerExternalTrafficPolicy` and `controller.loadBalancerSourceRanges`.
+
+## 3.11.7
+
+Removed Configuration as Code `remotingSecurity` section for Jenkins 2.326 or newer. See [Documentation](https://www.jenkins.io/redirect/AdminWhitelistRule) to learn more.
+
+## 3.11.6
+
+Update Jenkins image and appVersion to jenkins lts release version 2.332.1
+
+
+## 3.11.5
+
+Change Backup Role name function call to match the RoleDef function call in the Backup RoleBinding
+
+## 3.11.4
+
+Update Jenkins image and appVersion to jenkins lts release version 2.319.3
+
+
+## 3.11.3
+
+Update kiwigrid/k8s-sidecar:1.15.0
+Update jenkins/inbound-agent:4.11.2-4
+
+## 3.11.2
+
+Improve example for workspaceVolume. Clarify that this is not a list.
+
+## 3.11.1
+
+Update configuration-as-code plugin to 1.55.1
+
+
+## 3.11.0
+
+Update default plugin versions
+
+| plugin | old version | new version |
+| --------------------- | ----------- | ----------- |
+| kubernetes | 1.31.1 | 1.31.3 |
+| git | 4.10.1 | 4.10.2 |
+
+## 3.10.3
+
+Update Jenkins image and appVersion to jenkins lts release version 2.319.2
+
+
+## 3.10.2
+
+Fix definition of startupProbe when deploying on a Kubernetes cluster < 1.16
+
+## 3.10.1
+
+correct VALUES_SUMMARY.md for installLatestPlugins
+
+## 3.10.0
+
+Update default plugin versions
+
+| plugin | old version | new version |
+| --------------------- | ----------- | ----------- |
+| kubernetes | 1.30.11 | 1.31.1 |
+| git | 4.10.0 | 4.10.1 |
+| configuration-as-code | 1.54 | 1.55 |
+
+## 3.9.4
+
+Add JAVA_OPTIONS to the README so proxy settings get picked by jenkins-plugin-cli
+
+## 3.9.3
+
+Fix config reload request URL when httpsKeystore in use
+
+## 3.9.2
+
+Update Jenkins image and appVersion to jenkins lts release version 2.319.1
+Update following plugins:
+
+* kubernetes:1.30.11
+* git:4.10.0
+* configuration-as-code:1.54
+
+## 3.9.1
+
+Adding `tpl` to `controller.overrideArgs`
+
+## 3.9.0
+
+Added containerSecurityContext
+
+## 3.8.9
+
+Fix mounting of HTTPS keystore secret when httpsKeyStore is enabled
+
+## 3.8.8
+
+Update Jenkins image and appVersion to jenkins lts release version 2.303.3
+
+## 3.8.7
+
+Adding `tpl` to `initScripts`
+
+## 3.8.6
+
+Add `controller.tagLabel` to specify the label for the image tag, for example `jdk11` or `alpine`
+
+## 3.8.5
+
+Move jenkins web root outside of home dir
+
+## 3.8.4
+
+Add `controller.initConfigMap` to pass pre-existing `init.groovy.d` ConfigMaps to the controller
+
+## 3.8.3
+
+Update missed reference to jenkins/inbound-agent:4.11-1
+
+## 3.8.2
+
+Update jenkins/inbound-agent:4.11-1
+
+## 3.8.1
+
+Update jenkins/inbound-agent:4.10-3
+
+## 3.8.0
+
+Update kiwigrid/k8s-sidecar:1.14.2
+
+## 3.7.1
+
+Update git and casc plugins versions
+
+## 3.7.0
+
+Added the option to create AWS SecurityGroupPolicy resources
+
+## 3.6.2
+
+Fix httpsKeyStore mount when `controller.httpsKeyStore.enable` is `true`
+
+## 3.6.1
+
+Update Jenkins image and appVersion to jenkins lts release version 2.303.2
+
+
+## 3.6.0
+Support custom agent pod labels
+
+## 3.5.20
+Disallow ingress on port 50000 when agent listener is disabled
+
+## 3.5.19
+Add support for specifying termination-log behaviour for Jenkins controller
+
+## 3.5.18
+Add support for creating a Pod Disruption Budget for Jenkins controller
+
+## 3.5.17
+Update workdingDir to `/home/jenkins/agent`
+
+## 3.5.16
+Update location of icon (wiki.jenkins.io is down)
+
+## 3.5.15
+Add support for adding labels to the Jenkins home Persistent Volume Claim (pvc)
+
+## 3.5.14
+
+* Updated versions of default plugins
+* Use verbose logging during plugin installation
+* download the latest version of all plugin dependencies (Fixes #442)
+
+## 3.5.13
+
+Update Jenkins image and appVersion to jenkins lts release version 2.303.1
+
+## 3.5.12
+
+Added extended documentation for Backup and Restore.
+
+## 3.5.11
+
+Sanitized the Jenkins Label
+
+## 3.5.10
+
+Fixed `controller.customJenkinsLabels` not getting templated into the controller `labelString:` field in JCasC
+
+## 3.5.9
+
+Update Jenkins image and appVersion to jenkins lts release version 2.289.3
+
+
+## 3.5.8
+
+Add parameter `backup.serviceAccount.create` to disable service account creation for backup service and `backup.serviceAccount.name` to allow change of the SA name.
+`backup.annotations` was moved to `backup.serviceAccount.annotations`
+
+## 3.5.7
+
+Enable setting `controller.serviceExternalTrafficPolicy` to set [the standard Service option](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip). `externalTrafficPolicy` denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints.
+
+## 3.5.6
+
+Add optional `controller.initContainerResources`, if set, it will change resources allocation for init controller, overwise the `controller.resources` will be used
+
+## 3.5.5
+
+Allow to configure nodeUsageMode via `agent.nodeUsageMode`
+
+## 3.5.4
+
+Update tests to work with unittest 0.2.6
+
+## 3.5.3
+
+Update Jenkins image and appVersion to jenkins lts release version 2.289.2
+
+## 3.5.2
+
+Enable setting `controller.installLatestSpecifiedPlugins` to set whether to download the latest dependencies of any plugin that is requested to have the latest version.
+
+## 3.5.1
+Fix activeDeadlineSeconds wrong type bug in jenkins-backup-cronjob template
+
+## 3.5.0
+
+Allow `controller.podAnnotations` to be render as a template
+
+## 3.4.1
+
+Allow showRawYaml for the default agent's pod template to be customized.
+
+## 3.4.0
+
+configAutoReload container updated from `kiwigrid/k8s-sidecar:0.1.275` to `kiwigrid/k8s-sidecar:1.12.2`
+
+## 3.3.23
+
+Make `controller.ingress.resourceRootUrl` compatible with API version networking.k8s.io/v1 on k8s >= 1.19.x
+
+## 3.3.22
+
+Update Jenkins image and appVersion to jenkins lts release version 2.289.1
+
+## 3.3.21
+`persistence.mounts` additionally mount to init container to allow custom CA certificate keystore
+
+## 3.3.18
+Added `controller.overrideArgs` so any cli argument can be passed to the WAR.
+
+## 3.3.17
+Correct docs on disabling plugin installation
+
+## 3.3.16
+Support generating `SecretClaim` resources in order to read secrets from HashiCorp Vault into Kubernetes using `kube-vault-controller`.
+
+## 3.3.15
+Prevent `controller.httpsKeyStore` from improperly being quoted, leading to an invalid location on disk
+
+## 3.3.14
+Correct docs on disabling plugin installation
+
+## 3.3.13
+Update plugins
+
+## 3.3.12
+Add `controller.additionalExistingSecrets` property
+
+## 3.3.11
+Add support for disabling the Agent listener service via `controller.agentListenerEnabled`.
+
+## 3.3.10
+Update Jenkins image and appVersion to jenkins lts release version 2.277.4
+
+## 3.3.9
+* Change helper template so user defined `agent.jenkinsUrl` value will always be used, if set
+* Simplify logic for `jenkinsUrl` and `jenkinsTunnel` generation: always use fully qualified address
+
+## 3.3.8
+Update Jenkins image and appVersion to jenkins lts release version 2.277.3
+
+## 3.3.7
+fix controller-ingress line feed bug
+
+## 3.3.6
+
+Update Git plugin version to v4.7.1
+Update ldap plugin version to v2.5
+
+## 3.3.5
+
+Use tpl function for environment vars. Fixes [https://github.com/jenkinsci/helm-charts/issues/324]
+
+## 3.3.4
+
+Update Jenkins image and appVersion to jenkins lts release version 2.277.2
+
+
+## 3.3.3
+
+Enable setting `controller.installLatestPlugins` to set whether to download the minimum required version of all dependencies.
+
+## 3.3.2
+
+Add `controller.additionalSecrets` documentation
+
+## 3.3.1
+
+Add `controller.additionalSecrets` property
+
+## 3.3.0
+
+Change default Jenkins image to `jdk11` variant
+
+## 3.2.6
+
+Add missing `controller.jenkinsUrlProtocol` property
+
+## 3.2.5
+
+Add additional metadata `artifacthub.io/images` for artifacthub
+
+## 3.2.4
+Update Jenkins image and appVersion to jenkins lts release version 2.277.1
+Update Git plugin version to v4.6.0
+Update kubernetes plugin version to v1.29.2
+
+## 3.2.3
+
+Fix rendering `controller.ingress.path`
+
+## 3.2.2
+
+Added description for `controller.jenkinsUrl` value
+
+## 3.2.1
+
+Enable setting ImagePullSecrets to controller and agent service accounts.
+
+## 3.2.0
+
+Calculate consistent unique agent IDs to be used in pod templates. Fixes [https://github.com/jenkinsci/helm-charts/issues/270]
+
+## 3.1.15
+
+Fix documentation for the kubernetes probes
+
+## 3.1.14
+
+Typo in documentation
+
+## 3.1.13
+
+Update Jenkins image and appVersion to jenkins lts release version 2.263.4
+
+## 3.1.12
+
+Added GitHub action to automate the updating of LTS releases.
+
+## 3.1.11
+
+Enable setting controller.updateStrategy to change the update strategy for StatefulSet
+
+## 3.1.10
+
+Fixed issue for the AgentListener where it was not possible to attribute a NodePort
+
+## 3.1.9
+
+Upgrade kubernetes plugin to 1.29.0 and CasC plugin to 1.47
+
+## 3.1.8
+
+Fix init scripts config map name
+
+## 3.1.7
+
+Fix missing newline when `httpsKeyStore` is enabled
+
+## 3.1.6
+
+Mount controller init scripts from ConfigMap
+
+## 3.1.5
+
+Fix `namespaceOverride` not applied when loading JCasC
+
+## 3.1.4
+
+Update Git plugin version to v4.5.2
+
+## 3.1.3
+
+Update Jenkins image and appVersion to jenkins lts release version 2.263.3
+
+## 3.1.2
+
+Enable setting maxRequestsPerHostStr to change the max concurrent connections to Kubernetes API
+
+## 3.1.1
+
+Update Jenkins image and appVersion to jenkins lts release version 2.263.2
+
+## 3.1.0
+
+* Added `.Values.controller.podSecurityContextOverride` and `.Values.backup.podSecurityContextOverride`.
+* Added simple default values tests for `jenkins-backup-cronjob.yaml`.
+
+## 3.0.14
+
+Enable to only backup job folder instead of whole jenkins
+
+## 3.0.13
+
+Improve Documentation around JCasc and Custom Image
+
+## 3.0.12
+
+Added GitHub Action testing on Kind 1.16, 1.17, 1.18, 1.19 & 1.20
+
+## 3.0.11
+
+Fixes & unit tests for Ingress resources on Kubernetes 1.19 and above
+
+## 3.0.10
+
+Ingress resources on Kubernetes 1.19 (or above) are created with the version `networking.k8s.io/v1`
+
+## 3.0.9
+
+Added support for backing up to Azure Blob Storage.
+
+## 3.0.8
+
+* Typo in documentation
+
+## 3.0.7
+
+* Add support for setting default agent workspaceVolume
+
+## 3.0.6
+
+Use 2.263.1 image
+
+## 3.0.5
+
+* Update appVersion to reflect new jenkins lts release version 2.263.1
+
+## 3.0.4
+
+* Fix documentation for additional secret mounts
+
+## 3.0.3
+
+* Update `README.md` with explanation on how to mount additional secrets
+
+## 3.0.2
+
+* Fix `.Values.controller.tolerations` and `.Values.controller.nodeSelector` variable names in templates\jenkins-backup-cronjob.yaml
+
+## 3.0.1
+
+* added 'runAsNonroot' to security context
+
+## 3.0.0
+
+* Chart uses StatefulSet instead of Deployment
+* XML configuration was removed in favor of JCasC
+* chart migrated to helm 3.0.0 (apiVersion v2)
+* offending terms have been removed
+* values have been renamed and re-ordered to make it easier to use
+* already deprecated items have been removed
+* componentName for the controller is now `jenkins-controller`
+* componentName for the agent is now `jenkins-agent`
+* container names are now
+ * `init` for the init container which downloads Jenkins plugins
+ * `jenkins` for the Jenkins controller
+ * `config-reload` for the sidecar container which automatically reloads JCasC
+* Updated UI tests to use official `bats/bats` image instead of `dduportal/bats`
+
+For migration instructions from previous versions and additional information check README.md.
+
+## 2.19.0
+
+* Use lts version 2.249.3
+* Update kubernetes, workflow-aggregator, git and configuration-as-code plugins.
+* Fail apply_config.sh script if an error occurs.
+
+## 2.18.2
+
+Fix: `master.javaOpts` issue with quoted values
+
+## 2.18.1
+
+Recommend installing plugins in custom image
+
+## 2.18.0
+
+Removed /tmp volume. Making /tmp a volume causes permission issues with jmap/jstack on certain Kubernetes clusters
+
+## 2.17.1
+
+Fix location of jenkins.war file.
+It is located in `/usr/share/jenkins/jenkins.war` and can be fonfigured via `master.jenkinsWar`.
+
+## 2.17.0
+
+Add support for plugin-installation-manager-tool
+
+## 2.16.0
+
+Added Startup probe for Jenkins pod when Kubernetes cluster is 1.16 or newer
+
+## 2.15.5
+
+scriptApproval is taken into account when enableXmlConfig is false.
+
+## 2.15.4
+
+Add Tilt support for easier helm chart development.
+
+## 2.15.3
+
+Fix error on missing `ingress.paths` value
+
+## 2.15.2
+
+Added documentation for ingress and jenkins URL
+
+## 2.15.1
+
+Fix priorityClassName entry in values.yaml file
+
+## 2.15.0
+
+Added support for disabling the helm.sh/chart annotation
+
+## 2.14.0
+
+Added support for annotations in podTemplates
+
+## 2.13.2
+
+Add nodeSelector in the backup pod
+Fix tolerations in the backup pod
+
+## 2.13.1
+
+Update list of maintainers
+
+## 2.13.0
+
+Added Support for websockets in the default Jcasc config
+Added trailing slash to JENKINS_URL env var
+
+## 2.12.2
+
+Added unit tests for most resources in the Helm chart.
+
+## 2.12.1
+
+Helm chart README update
+
+## 2.12.0
+
+Add option to configure securityContext capabilities
+
+## 2.11.0
+
+Added configurable security context for jenkins backup CronJob and annotations to its serviceaccount.
+
+## 2.10.0
+
+Make activeDeadlineSeconds for backup job configurable
+
+## 2.9.0
+
+Make namespace of PrometheusRule configurable
+
+## 2.8.2
+
+Bumped configuration-as-code plugin version from 1.41 to 1.43.
+See [configuration-as-code plugin issue #1478](https://github.com/jenkinsci/configuration-as-code-plugin/issues/1478)
+
+## 2.8.1
+
+Fix indentation of JAVA_OPTS
+
+## 2.8.0
+
+Add support for helm unittest and include first tests
+
+## 2.7.2
+
+Target port of container `jenkins-sc-config` taken the value from values.yaml.
+
+## 2.7.0
+
+Add a secondary ingress template for those who want a second ingress with different labels or annotations or whatever else.
+
+Example: You want /github-webhook to be on a public ingress, while the main Jenkins intance to be on a private locked down ingress.
+
+## 2.6.5
+
+Update configScripts example
+
+## 2.6.4
+
+Add timja as a maintainer
+
+## 2.6.3
+
+Update k8s-sidecar image to 0.1.193
+
+## 2.6.2
+
+Only mount empty dir secrets-dir if either `master.enableXmlConfig` or `master.secretsFilesSecret` is set
+Fixes #19
+
+## 2.6.1 Do not render empty JCasC templates
+
+## 2.6.0 First release in jenkinsci GitHub org
+
+Updated README for new location
+
+## 2.5.2
+
+Fix as per JENKINS-47112
+
+## 2.5.1
+
+Support Jenkins Resource Root URL
+
+## 2.5.0
+
+Add an option to specify that Jenkins master should be initialized only once, during first install.
+
+## 2.4.1
+
+Reorder README parameters into sections to facilitate chart usage and maintenance
+
+## 2.4.0 Update default agent image
+
+`jenkins/jnlp-slave` is deprected and `jenkins/inbound-agent` should be used instead.
+Also updated it to newest version (4.3-4).
+
+## 2.3.3 correct templating of master.slaveJenkinsUrl
+
+Fixes #22708
+
+## 2.3.2 Fix wrong value for overwritePluginsFromImage
+
+Fixes #23003
+Fixes #22633
+
+Also fixes indentation for #23114
+
+## 2.3.1
+
+Always mount {{ .Values.master.jenkinsRef }}/secrets/ directory. Previous it
+was mounted only when `master.enableXmlConfig` was enabled.
+
+## 2.3.0
+
+Add an option to specify pod based on labels that can connect to master if NetworkPolicy is enabled
+
+## 2.2.0 increase retry for config auto reload
+
+Configure `REQ_RETRY_CONNECT` to `10` to give Jenkins more time to start up.
+<https://github.com/kiwigrid/k8s-sidecar>
+
+Value can be configured via `master.sidecars.configAutoReload.reqRetryConnect`
+
+## 2.1.2 updated README
+
+## 2.1.1 update credentials-binding plugin to 1.23
+
+## 2.1.0
+
+Add support to set `runAsUser` and `runAsGroup` for `agent`.
+
+## 2.0.1
+
+Only render authorizationStrategy and securityRealm when values are set.
+
+## 2.0.0 Configuration as Code now default + container does not run as root anymore
+
+The README contains more details for this update.
+Please note that the updated values contain breaking changes.
+
+## 1.27.0 Update plugin versions & sidecar container
+
+| plugin | old version | new version |
+| --------------------- | ----------- | ----------- |
+| kubernetes | 1.25.3 | 1.25.7 |
+| workflow-job | 2.38 | 2.39 |
+| credentials-binding | 1.21 | 1.22 |
+| configuration-as-code | 1.39 | 1.41 |
+
+configAutoReload container updated from `kiwigrid/k8s-sidecar:0.1.132` to `kiwigrid/k8s-sidecar:0.1.144`
+
+## 1.26.0
+
+Add support to override `workingDir` for default pod template
+
+## 1.25.0
+
+Add support for installing plugins in addition to the chart's default plugins via `master.additionalPlugins`
+
+## 1.24.0
+
+Allow configuration of yamlMergeStrategy via `agent.yamlMergeStrategy`
+
+## 1.23.2
+
+In the `jenkins.xml.podTemplate` helper function, allow templating of all string values under `agent.volumes` except `type` by rendering them with the `tpl` function
+
+## 1.23.1
+
+Added auto detection for Ingress API version
+
+## 1.23.0
+
+Allow to use an existing secret for the jenkins admin credentials
+
+## 1.22.0
+
+Add support for UI security in the default JCasC via `master.JCasC.securityRealm` and `master.JCasC.authorizationStrategy` which deny anonymous access by default
+
+## 1.21.3
+
+Render `agent.envVars` in kubernetes pod template JCasC
+
+## 1.21.2
+
+Cleanup `agent.yamlTemplate` rendering in kubernetes pod template XML configuration
+
+## 1.21.1
+
+Render `agent.nodeSelector` in the kubernetes pod template JCasC
+
+## 1.21.0
+
+Add support for overriding Ingress paths via `master.ingress.paths`
+
+## 1.20.0
+
+Add the following options for configuring the Kubernetes plugin.
+
+- master.slaveDefaultsProviderTemplate
+- master.slaveJenkinsUrl
+- master.slaveJenkinsTunnel
+- master.slaveConnectTimeout
+- master.slaveReadTimeout
+
+## 1.19.0
+
+Add support for disabling remember me via `master.disableRememberMe`
+Add support for using a different markup formatter via `master.markupFormatter`
+
+## 1.18.1
+
+Add support for executor mode configuraton with `master.executorMode`.
+
+## 1.18.0 Make installation of configuration-as-code plugin explicit
+
+Instead of configuring the configuration-as-code plugin version via
+`master.JCasC.pluginVersion` it is now installed via `master.installPlugins`
+
+## 1.17.2
+
+Allow templating of `serviceAccount.annotations` and `serviceAccountAgent.annotations` by rendering them with the `tpl` function
+
+## 1.17.1
+
+Add support for Persistent Volume Claim (PVC) in `agent.volumes`
+
+## 1.17.0
+
+Render `agent.volumes` in kubernetes pod template JCasC
+
+## 1.16.2
+
+Reverts 1.16.1 as it introduced an error #22047
+
+## 1.16.1
+
+Fixed a bug with master.runAsUser variable due to use wrong type for comparison.
+
+## 1.16.0
+
+Add `master.overwritePluginsFromImage` to allow support for jenkins plugins installed in the master image to persist.
+
+## 1.15.0 Update plugin versions & sidecar container
+
+| plugin | old version | new version |
+| --------------------- | ----------- | ----------- |
+| kubernetes | 1.25.1 | 1.25.3 |
+| workflow-job | 2.36 | 2.38 |
+| git | 4.2.0 | 4.2.2 |
+| configuration-as-code | 1.36 | 1.39 |
+
+configAutoReload container updated from `kiwigrid/k8s-sidecar:0.1.20` to `kiwigrid/k8s-sidecar:0.1.132`
+
+## 1.14.0
+
+support auto-reload container environment variables configuration
+
+## 1.13.3
+
+Fix wrong indent in tolerations
+
+## 1.13.2
+
+Add support for custom ClusterIP
+
+## 1.13.1
+
+Fix `agent.yamlTemplate` rendering in kubernetes pod template JCasC
+
+## 1.13.0
+
+Add `master.networkPolicy.internalAgents` and `master.networkPolicy.externalAgents` stanzas to fine grained controls over where internal/external agents can connect from. Internal ones are allowed based on pod labels and (optionally) namespaces, and external ones are allowed based on IP ranges.
+
+## 1.12.0 Support additional agents
+
+Add support for easy configuration of additional agents which inherit values from `agent`.
+
+## 1.11.3
+
+Update the kubernetes plugin from 1.24.1 to 1.25.1 and grant 'watch' permission to 'events' which is required since this plugin version.
+
+## 1.11.2 Configure agent.args in values.yaml
+
+## 1.11.1 Support for master.additionalConfig
+
+Fixed a bug with jenkinsHome variable in range block when master.additionalConfig is set - Helm cannot evaluate field Values in type interface {}.
+
+## 1.11.0 Add support for configuring custom pod templates
+
+Add `agent.podTemplates` option for declaring custom pod templates in the default configured kubernetes cloud.
+
+## 1.10.1 Only copy JCasC files if there are any
+
+The chart always tried to copy Configuration as Code configs even if there are none. That resulted in an error which is resolved with this.
+
+## 1.10.0 Remove configuration-as-code-support plugins
+
+In recent version of configuration-as-code-plugin this is no longer necessary.
+
+## 1.9.24
+
+Update JCasC auto-reload docs and remove stale ssh key references from version "1.8.0 JCasC auto reload works without ssh keys"
+
+## 1.9.23 Support jenkinsUriPrefix when JCasC is enabled
+
+Fixed a bug in the configuration as code reload URL, where it wouldn't work with a jenkinsUriPrefix set.
+
+## 1.9.22
+
+Add `master.jenkinsHome` and `master.jenkinsRef` options to use docker images derivates from Jenkins
+
+## 1.9.21
+
+Add `master.terminationGracePeriodSeconds` option
+
+## 1.9.20
+
+Update default plugins
+
+- kubernetes:1.24.1
+- workflow-job:2.36
+- workflow-aggregator:2.6
+- credentials-binding:1.21
+- git:4.2.0
+- configuration-as-code:1.36
+
+## 1.9.19
+
+Update docs for Helm 3
+
+## 1.9.18
+
+Make `jenkins-home` attachable to Azure Disks without pvc
+
+```yaml
+ volumes:
+ - name: jenkins-home
+ azureDisk:
+ kind: Managed
+ diskName: myAKSDisk
+ diskURI: /subscriptions/<subscriptionID>/resourceGroups/MC_myAKSCluster_myAKSCluster_eastus/providers/Microsoft.Compute/disks/myAKSDisk
+```
+
+## 1.9.16
+
+Fix PodLabel for NetworkPolicy to work if enabled
+
+## 1.9.14
+
+Properly fix case sense in `Values.master.overwriteConfig` in `config.yaml`
+
+## 1.9.13
+
+Fix case sense in `Values.master.overwriteConfig` in `config.yaml`
+
+## 1.9.12
+
+Scriptapprovals are overwritten when overwriteConfig is enabled
+
+## 1.9.10
+
+Added documentation for `persistence.storageClass`.
+
+## 1.9.9
+Make `master.deploymentAnnotation` configurable.
+
+## 1.9.8
+
+Make `agent.slaveConnectTimeout` configurable: by increasing this value Jenkins will not cancel&ask k8s for a pod again, while it's on `ContainerCreating`. Useful when you have big images or autoscaling takes some time.
+
+## 1.9.7 Update plugin versions
+
+| plugin | old version | new version |
+|-----------------------|-------------|-------------|
+| kubernetes | 1.18.2 | 1.21.2 |
+| workflow-job | 2.33 | 2.36 |
+| credentials-binding | 1.19 | 1.20 |
+| git | 3.11.0 | 4.0.0 |
+| configuration-as-code | 1.27 | 1.32 |
+
+## 1.9.6
+
+Enables jenkins to use keystore inorder to have native ssl support #17790 <https://wiki.jenkins.io/pages/viewpage.action?pageId=135468777>
+
+## 1.9.5 Enable remoting security
+
+`Manage Jenkins` -> `Configure Global Security` -> `Enable Agent → Master Access Control` is now enabled via configuration as code plugin
+
+## 1.9.4 Option to set existing secret with Google Application Default Credentials
+
+Google application credentials are kept in a file, which has to be mounted to a pod. You can set `gcpcredentials` in `existingSecret` as follows:
+
+```yaml
+ existingSecret:
+ jenkins-service-account:
+ gcpcredentials: application_default_credentials.json
+```
+
+Helm template then creates the necessary volume mounts and `GOOGLE_APPLICATION_CREDENTIALS` environmental variable.
+
+## 1.9.3 Fix `JAVA_OPTS` when config auto-reload is enabled
+
+## 1.9.2 Add support for kubernetes-credentials-provider-plugin
+
+[kubernetes-credentials-provider-plugin](https://jenkinsci.github.io/kubernetes-credentials-provider-plugin/) needs permissions to get/watch/list kubernetes secrets in the namespaces where Jenkins is running.
+
+The necessary role binding can be created using `rbac.readSecrets` when `rbac.create` is `true`.
+
+To quote from the plugin documentation:
+
+> Because granting these permissions for secrets is not something that should be done lightly it is highly advised for security reasons that you both create a unique service account to run Jenkins as, and run Jenkins in a unique namespace.
+
+Therefor this is disabled by default.
+
+## 1.9.1 Update kubernetes plugin URL
+
+## 1.9.0 Change default serviceType to ClusterIP
+
+## 1.8.2
+
+Revert fix in `1.7.10` since direct connection is now disabled by default.
+
+## 1.8.1
+
+Add `master.schedulerName` to allow setting a Kubernetes custom scheduler
+
+## 1.8.0 JCasC auto reload works without ssh keys
+
+We make use of the fact that the Jenkins Configuration as Code Plugin can be triggered via http `POST` to `JENKINS_URL/configuration-as-code/reload`and a pre-shared key.
+The sidecar container responsible for reloading config changes is now `kiwigrid/k8s-sidecar:0.1.20` instead of it's fork `shadwell/k8s-sidecar`.
+
+References:
+
+- [Triggering Configuration Reload](https://github.com/jenkinsci/configuration-as-code-plugin/blob/master/docs/features/configurationReload.md)
+- [kiwigrid/k8s-sidecar](https://hub.docker.com/r/kiwigrid/k8s-sidecar)
+
+`master.sidecars.configAutoReload.enabled` now works using `casc.reload.token`
+
+## 1.7.10
+
+Disable direct connection in default configuration (when kubernetes plugin version >= 1.20.2).
+Note: In case direct connection is going to be used `jenkins/jnlp-slave` needs to be version `3.35-5` or newer.
+
+## 1.7.9
+
+Prevented Jenkins Setup Wizard on new installations
+
+## 1.7.8
+
+Extend extraPorts to be opened on the Service object, not just the container.
+
+## 1.7.7
+
+Add persistentvolumeclaim permission to the role to support new dynamic pvc workspaces.
+
+## 1.7.6
+
+Updated `master.slaveKubernetesNamespace` to parse helm templates.
+Defined an sensible empty value to the following variables, to silence invalid warnings:
+
+- master.extraPorts
+- master.scriptApproval
+- master.initScripts
+- master.JCasC.configScripts
+- master.sidecars.other
+- agent.envVars
+- agent.volumes
+
+## 1.7.5
+
+Fixed an issue where the JCasC won't run if JCasC auto-reload is enabled [issue #17135](https://github.com/helm/charts/issues/17135)
+
+## 1.7.4
+
+Comments out JCasC example of jenkins.systemMessage so that it can be used by end users. Previously, an attempt to set systemMessage causes Jenkins to startup, citing duplicate JCasC settings for systemMessage [issue #13333](https://github.com/helm/charts/issues/13333)
+
+## 1.7.2
+
+Update kubernetes-plugin to version 1.18.2 which fixes frequently encountered [JENKINS-59000](https://issues.jenkins-ci.org/plugins/servlet/mobile#issue/JENKINS-59000)
+
+## 1.7.1
+
+Update the default requirements for jenkins-agent to 512Mi which fixes frequently encountered [issue #3723](https://github.com/helm/charts/issues/3723)
+
+## 1.7.0
+
+[Jenkins Configuration as Code Plugin](https://github.com/jenkinsci/configuration-as-code-plugin) default configuration can now be enabled via `master.JCasC.defaultConfig`.
+
+JCasC default configuration includes:
+
+- Jenkins URL
+- Admin email `master.jenkinsAdminEmail`
+- crumbIssuer
+- disableRememberMe: false
+- mode: NORMAL
+- numExecutors: {{ .Values.master.numExecutors }}
+- projectNamingStrategy: "standard"
+- kubernetes plugin
+ - containerCapStr via `agent.containerCap`
+ - jenkinsTunnel
+ - jenkinsUrl
+ - maxRequestsPerHostStr: "32"
+ - name: "kubernetes"
+ - namespace
+ - serverUrl: `"https://kubernetes.default"`
+ - template
+ - containers
+ - alwaysPullImage: `agent.alwaysPullImage`
+ - args
+ - command
+ - envVars
+ - image: `agent.image:agent.imageTag`
+ - name: `.agent.sideContainerName`
+ - privileged: `.agent.privileged`
+ - resourceLimitCpu: `agent.resources.limits.cpu`
+ - resourceLimitMemory: `agent.resources.limits.memory`
+ - resourceRequestCpu: `agent.resources.requests.cpu`
+ - resourceRequestMemory: `agent.resources.requests.memory`
+ - ttyEnabled: `agent.TTYEnabled`
+ - workingDir: "/home/jenkins"
+ - idleMinutes: `agent.idleMinutes`
+ - instanceCap: 2147483647
+ - imagePullSecrets:
+ - name: `.agent.imagePullSecretName`
+ - label
+ - name
+ - nodeUsageMode: "NORMAL"
+ - podRetention: `agent.podRetention`
+ - serviceAccount
+ - showRawYaml: true
+ - slaveConnectTimeoutStr: "100"
+ - yaml: `agent.yamlTemplate`
+ - yamlMergeStrategy: "override"
+- security:
+ - apiToken:
+ - creationOfLegacyTokenEnabled: false
+ - tokenGenerationOnCreationEnabled: false
+ - usageStatisticsEnabled: true
+
+Example `values.yaml` which enables JCasC, it's default config and configAutoReload:
+
+```yaml
+master:
+ JCasC:
+ enabled: true
+ defaultConfig: true
+ sidecars:
+ configAutoReload:
+ enabled: true
+```
+
+add master.JCasC.defaultConfig and configure location
+
+- JCasC configuration is stored in template `jenkins.casc.defaults`
+ so that it can be used in `config.yaml` and `jcasc-config.yaml`
+ depending on if configAutoReload is enabled or not
+
+- Jenkins Location (URL) is configured to provide a startin point
+ for the config
+
+## 1.6.1
+
+Print error message when `master.sidecars.configAutoReload.enabled` is `true`, but the admin user can't be found to configure the SSH key.
+
+## 1.6.0
+
+Add support for Google Cloud Storage for backup CronJob (migrating from nuvo/kube-tasks to maorfr/kube-tasks)
+
+## 1.5.9
+
+Fixed a warning when sidecar resources are provided through a parent chart or override values
+
+## 1.5.8
+
+Fixed an issue when master.enableXmlConfig is set to false: Always mount jenkins-secrets volume if secretsFilesSecret is set (#16512)
+
+## 1.5.7
+
+added initial changelog (#16324)
+commit: cee2ebf98
+
+## 1.5.6
+
+enable xml config misspelling (#16477)
+commit: a125b99f9
+
+## 1.5.5
+
+Jenkins master label (#16469)
+commit: 4802d14c9
+
+## 1.5.4
+
+add option enableXmlConfig (#16346)
+commit: 387d97a4c
+
+## 1.5.3
+
+extracted "jenkins.URL" into template (#16347)
+commit: f2fdf5332
+
+## 1.5.2
+
+Fix backups when deployment has custom name (#16279)
+commit: 16b89bfff
+
+## 1.5.1
+
+Ability to set custom namespace for ServiceMonitor (#16145)
+commit: 18ee6cf01
+
+## 1.5.0
+
+update Jenkins plugins to fix security issue (#16069)
+commit: 603cf2d2b
+
+## 1.4.3
+
+Use fixed container name (#16068)
+commit: b3e4b4a49
+
+## 1.4.2
+
+Provide default job value (#15963)
+commit: c462e2017
+
+## 1.4.1
+
+Add Jenkins backendconfig values (#15471)
+commit: 7cc9b54c7
+
+## 1.4.0
+
+Change the value name for docker image tags - standartise to helm preferred value name - tag; this also allows auto-deployments using weaveworks flux (#15565)
+commit: 5c3d920e7
+
+## 1.3.6
+
+jenkins deployment port should be target port (#15503)
+commit: 83909ebe3
+
+## 1.3.5
+
+Add support for namespace specification (#15202)
+commit: e773201a6
+
+## 1.3.4
+
+Adding sub-path option for scraping (#14833)
+commit: e04021154
+
+## 1.3.3
+
+Add existingSecret to Jenkins backup AWS credentials (#13392)
+commit: d9374f57d
+
+## 1.3.2
+
+Fix JCasC version (#14992)
+commit: 26a6d2b99
+
+## 1.3.1
+
+Update affinity for a backup cronjob (#14886)
+commit: c21ed8331
+
+## 1.3.0
+
+only install casc support plugin when needed (#14862)
+commit: a56fc0540
+
+## 1.2.2
+
+DNS Zone customization (#14775)
+commit: da2910073
+
+## 1.2.1
+
+only render comment if configAutoReload is enabled (#14754)
+commit: e07ead283
+
+## 1.2.0
+
+update plugins to latest version (#14744)
+commit: 84336558e
+
+## 1.1.24
+
+add example for EmptyDir volume (#14499)
+commit: cafb60209
+
+## 1.1.23
+
+check if installPlugins is set before using it (#14168)
+commit: 1218f0359
+
+## 1.1.22
+
+Support servicemonitor and alerting rules (#14124)
+commit: e15a27f48
+
+## 1.1.21
+
+Fix: healthProbe timeouts mapping to initial delay (#13875)
+commit: 825b32ece
+
+## 1.1.20
+
+Properly handle overwrite config for additional configs (#13915)
+commit: 18ce9b558
+
+## 1.1.18
+
+update maintainer (#13897)
+commit: 223002b27
+
+## 1.1.17
+
+add apiVersion (#13795)
+commit: cd1e5c35a
+
+## 1.1.16
+
+allow changing of the target port to support TLS termination sidecar (#13576)
+commit: a34d3bbcc
+
+## 1.1.15
+
+fix wrong pod selector in jenkins-backup (#13542)
+commit: b5df4fd7e
+
+## 1.1.14
+
+allow templating of customInitContainers (#13536)
+commit: d1e1421f4
+
+## 1.1.13
+
+fix #13467 (wrong deprecation message) (#13511)
+commit: fbe28fa1c
+
+## 1.1.12
+
+Correct customInitContainers Name example. (#13405)
+commit: 6c6e40405
+
+## 1.1.11
+
+fix master.runAsUser, master.fsGroup examples (#13389)
+commit: 2d7e5bf72
+
+## 1.1.10
+
+Ability to specify raw yaml template (#13319)
+commit: 77aaa9a5f
+
+## 1.1.9
+
+correct NOTES.txt - use master.ingress.hostname (#13318)
+commit: b08ef6280
+
+## 1.1.8
+
+explain how to upgrade major versions (#13273)
+commit: e7617a97e
+
+## 1.1.7
+
+Add support for idleMinutes and serviceAccount (#13263)
+commit: 4595ee033
+
+## 1.1.6
+
+Use same JENKINS_URL no matter if slaves use different namespace (#12564)
+commit: 94c90339f
+
+## 1.1.5
+
+fix deprecation checks (#13224)
+commit: c7d2f8105
+
+## 1.1.4
+
+Fix issue introduced in #13136 (#13232)
+commit: 0dbcded2e
+
+## 1.1.3
+
+fix chart errors (#13197)
+commit: 692a1e3da
+
+## 1.1.2
+
+correct selector for jenkins pod (#13200)
+commit: 4537e7fda
+
+## 1.1.1
+
+Fix rendering of customInitContainers and lifecycle for Jenkins helm chart (#13189)
+commit: e8f6b0ada
+
+## 1.1.0
+
+Add support for openshift route in jenkins (#12973)
+commit: 48c58a430
+
+## 1.0.0
+
+helm chart best practices (#13136)
+commit: b02ae3f48
+
+### Breaking changes
+
+- values have been renamed to follow helm chart best practices for naming conventions so
+ that all variables start with a lowercase letter and words are separated with camelcase
+ <https://helm.sh/docs/chart_best_practices/#naming-conventions>
+- all resources are now using recommended standard labels
+ <https://helm.sh/docs/chart_best_practices/#standard-labels>
+
+As a result of the label changes also the selectors of the deployment have been updated.
+Those are immutable so trying an updated will cause an error like:
+
+```text
+Error: Deployment.apps "jenkins" is invalid: spec.selector: Invalid value: v1.LabelSelector{MatchLabels:map[string]string{"app.kubernetes.io/component":"jenkins-master", "app.kubernetes.io/instance":"jenkins"}, MatchExpressions:[]v1.LabelSelectorRequirement(nil)}: field is immutable
+```
+
+In order to upgrade, delete the Jenkins Deployment before upgrading:
+
+```console
+kubectl delete deploy jenkins
+```
+
+## 0.40.0
+
+Allow to override jenkins location protocol (#12257)
+commit: 18a830626
+
+## 0.39.0
+
+Add possibility to add custom init-container and lifecycle for master-container (#13062)
+commit: 14d043593
+
+## 0.38.0
+
+Support `priorityClassName` on Master Deployment (#13069)
+commit: e896c62bc
+
+## 0.37.3
+
+Add support for service account annotations in jenkins (#12969)
+commit: b22774e2f
+
+## 0.37.2
+
+fix: add hostName to ingress in values.yaml (#12946)
+commit: 041045e9b
+
+## 0.37.1
+
+Update to match actual defaults in value.yaml (#12904)
+commit: 73b6d37eb
+
+## 0.37.0
+
+Support multiple Jenkins instances in same namespace (#12748)
+commit: 32ff2f343
+
+## 0.36.5
+
+Fix wrong comment in values.yaml (#12761)
+commit: 9db8ced23
+
+## 0.36.4
+
+Re-add value for Ingress API Version (#12753)
+commit: ecb7791b5
+
+## 0.36.3
+
+allow templating of volumes (#12734)
+commit: adbda2ca6
+
+## 0.36.2
+
+Fix self-introduced whitespace bug (#12528)
+commit: eec1678eb
+
+## 0.36.1
+
+Add flag to overwrite jobs definition from values.yaml (#12427)
+commit: fd349b2fc
+
+## 0.36.0
+
+Replace OwnSshKey with AdminSshKey (#12140) (#12466)
+commit: 80a8c9eb6
+
+## 0.35.2
+
+add note for breaking changes (#12203)
+commit: e779c5a54
+
+## 0.35.1
+
+Allow Jenkins to run with READONLYROOTFS psp (#12338)
+commit: 7c419e191
+
+## 0.35.0
+
+Jenkins OverwriteConfig setting also overwrites init scripts (#9468)
+commit: 501335b76
+
+## 0.34.1
+
+Fix typo on hostname variable (#12156)
+commit: 3d337d8dd
+
+## 0.34.0
+
+Allow ingress without host rule (#11960)
+commit: ddc966d1e
+
+## 0.33.2
+
+Improve documentation - clarify that rbac is needed for autoreload (#11739)
+commit: 9d75a5c34
+
+## 0.33.1
+
+use object for rollingUpdate (#11909)
+commit: cb9cf21e8
+
+## 0.33.0
+
+Add hostAliases (#11701)
+commit: 0b89e1094
+
+## 0.32.10
+
+Fix slave jnlp port always being reset when container is restarted (#11685)
+commit: d7d51797b
+
+## 0.32.9
+
+add ingress Hostname an ApiVersion to docs (#11576)
+commit: 4d3e77137
+
+## 0.32.8
+
+Support custom master pod labels in deployment (#9714) (#11511)
+commit: 9de96faa0
+
+## 0.32.7
+
+Fix Markdown syntax in README (#11496)
+commit: a32221a95
+
+## 0.32.6
+
+Added custom labels on jenkins ingress (#11466)
+commit: c875d2b9b
+
+## 0.32.5
+
+fix typo in default jenkins agent image fixes #11356 (#11463)
+commit: 30adb9a91
+
+## 0.32.4
+
+fix incorrect Deployment when using sidecars (#11413)
+commit: 362b4cef8
+
+## 0.32.3
+
+[]: #10131 (#11411)
+commit: 49cb72055
+
+## 0.32.2
+
+Option to expose the slave listener port as host port (#11187)
+commit: 2f85a9663
+
+## 0.32.1
+
+Updating Jenkins deployment fails appears rollingUpdate needs to be (#11166)
+commit: 07fc9dbde
+
+## 0.32.0
+
+Merge Sidecard configs (#11339)
+commit: 3696090b9
+
+## 0.31.0
+
+Add option to overwrite plugins (#11231)
+commit: 0e9aa00a5
+
+## 0.30.0
+
+Added slave Pod env vars (#8743)
+commit: 1499f6608
+
+## 0.29.3
+
+revert indentation to previous working version (#11293)
+commit: 61662f17a
+
+## 0.29.2
+
+allow running sidecar containers for Jenkins master (#10950)
+commit: 9084ce54a
+
+## 0.29.1
+
+Indent lines related to EnableRawHtmlMarkupFormatter (#11252)
+commit: 20b310c08
+
+## 0.29.0
+
+Jenkins Configuration as Code (#9057)
+commit: c3e8c0b17
+
+## 0.28.11
+
+Allow to enable OWASP Markup Formatter Plugin (#10851)
+commit: 9486e5ddf
+
+## 0.28.10
+
+Fixes #1341 -- update Jenkins chart documentation (#10290)
+commit: 411c81cd0
+
+## 0.28.9
+
+Quoted JavaOpts values (#10671)
+commit: 926a843a8
+
+## 0.28.8
+
+Support custom labels in deployment (#9714) (#10533)
+commit: 3e00b47fa
+
+## 0.28.7
+
+separate test resources (#10597)
+commit: 7b7ae2d11
+
+## 0.28.6
+
+allow customizing livenessProbe periodSeconds (#10534)
+commit: 3c94d250d
+
+## 0.28.5
+
+Add role kind option (#8498)
+commit: e791ad124
+
+## 0.28.4
+
+workaround for busybox's cp (Closes: #10471) (#10497)
+commit: 0d51a4187
+
+## 0.28.3
+
+fix parsing java options (#10140)
+commit: 9448d0293
+
+## 0.28.2
+
+Fix job definitions in standard values.yaml (#10184)
+commit: 6b6355ae7
+
+## 0.28.1
+
+add numExecutors as a variable in values file (#10236)
+commit: d5ea2050f
+
+## 0.28.0
+
+various (#10223)
+commit: e17d2a65d
+
+## 0.27.0
+
+add backup cronjob (#10095)
+commit: 863ead8db
+
+## 0.26.2
+
+add namespace flag for port-forwarding in jenkins notes (#10399)
+commit: 846b589a9
+
+## 0.26.1
+
+- fixes #10267 when executed with helm template - otherwise produces an invalid template. (#10403)
+ commit: 266f9d839
+
+## 0.26.0
+
+Add subPath for jenkins-home mount (#9671)
+commit: a9c76ac9b
+
+## 0.25.1
+
+update readme to indicate the correct image that is used by default (#9915)
+commit: 6aba9631c
+
+## 0.25.0
+
+Add ability to manually set Jenkins URL (#7405)
+commit: a0178fcb4
+
+## 0.24.0
+
+Make AuthorizationStrategy configurable (#9567)
+commit: 06545b226
+
+## 0.23.0
+
+Update Jenkins public chart (#9296)
+commit: 4e5f5918b
+
+## 0.22.0
+
+allow to override jobs (#9004)
+commit: dca9f9ab9
+
+## 0.21.0
+
+Simple implementation of the option to define the ingress path to the jenkins service (#8101)
+commit: 013159609
+
+## 0.20.2
+
+Cosmetic change to remove necessity of changing "appVersion" for every new LTS release (#8866)
+commit: f52af042a
+
+## 0.20.1
+
+Added ExtraPorts to open in the master pod (#7759)
+commit: 78858a2fb
+
+## 0.19.1
+
+Fix component label in NOTES.txt ... (#8300)
+commit: c5494dbfe
+
+## 0.19.0
+
+Kubernetes 1.9 support as well as automatic apiVersion detection (#7988)
+commit: 6853ad364
+
+## 0.18.1
+
+Respect SlaveListenerPort value in config.xml (#7220)
+commit: 0a5ddac35
+
+## 0.18.0
+
+Allow replacement of Jenkins config with configMap. (#7450)
+commit: c766da3de
+
+## 0.17.0
+
+Add option to allow host networking (#7530)
+commit: dc2eeff32
+
+## 0.16.25
+
+add custom jenkins labels to the build agent (#7167)
+commit: 3ecde5dbf
+
+## 0.16.24
+
+Move kubernetes and job plugins to latest versions (#7438)
+commit: 019e39456
+
+## 0.16.23
+
+Add different Deployment Strategies based on persistence (#6132)
+commit: e0a20b0b9
+
+## 0.16.22
+
+avoid lint errors when adding Values.Ingress.Annotations (#7425)
+commit: 99eacc854
+
+## 0.16.21
+
+bump appVersion to reflect new jenkins lts release version 2.121.3 (#7217)
+commit: 296df165d
+
+## 0.16.20
+
+Configure kubernetes plugin for including namespace value (#7164)
+commit: c0dc6cc48
+
+## 0.16.19
+
+make pod retention policy setting configurable (#6962)
+commit: e614c1033
+
+## 0.16.18
+
+Update plugins version (#6988)
+commit: bf8180018
+
+## 0.16.17
+
+Add Master.AdminPassword in README (#6987)
+commit: 13e754ad7
+
+## 0.16.16
+
+Added jenkins location configuration (#6573)
+commit: 79de7026c
+
+## 0.16.15
+
+use generic env var, not oracle specific env var (#6116)
+commit: 6084ab4a4
+
+## 0.16.14
+
+Allow to specify resource requests and limits on initContainers (#6723)
+commit: 942a33b1a
+
+## 0.16.13
+
+Added support for NodePort service type for jenkens agent svc (#6571)
+commit: 89a213c2b
+
+## 0.16.12
+
+Added ability to configure multiple LoadBalancerSourceRanges (#6243)
+commit: 01604ddbc
+
+## 0.16.11
+
+Removing ContainerPort configuration as at the moment it does not work when you change this setting (#6411)
+commit: e1c0468bd
+
+## 0.16.9
+
+Fix jobs parsing for configmap by adding toYaml to jobs.yaml template (#3747)
+commit: b2542a123
+
+## 0.16.8
+
+add jenkinsuriprefix in healthprobes (#5737)
+commit: 435d7a7b9
+
+## 0.16.7
+
+Added the ability to switch from ClusterRoleBinding to RoleBinding. (#6190)
+commit: dde03ede0
+
+## 0.16.6
+
+Make jenkins master pod security context optional (#6122)
+commit: 63653fd59
+
+## 0.16.5
+
+Rework resources requests and limits (#6077) (#6077)
+commit: e738f99d0
+
+## 0.16.4
+
+Add jenkins master pod annotations (#6313)
+commit: 5e7325721
+
+## 0.16.3
+
+Split Jenkins readiness and liveness probe periods (#5704)
+commit: fc6100c38
+
+## 0.16.1
+
+fix typo in jenkins README (#5228)
+commit: 3cd3f4b8b
+
+## 0.16.0
+
+Inherit existing plugins from Jenkins image (#5409)
+commit: fd93bff82
+
+## 0.15.1
+
+Allow NetworkPolicy.ApiVersion and Master.Ingress.ApiVersion to Differ (#5103)
+commit: 78ee4ba15
+
+## 0.15.0
+
+Secure Defaults (#5026)
+commit: 0fe90b520
+
+## 0.14.6
+
+Wait for up to 2 minutes before failing liveness check (#5161)
+commit: 2cd3fc481
+
+## 0.14.5
+
+correct ImageTag setting (#4371)
+commit: 8ea04174d
+
+## 0.14.4
+
+Update jenkins/README.md (#4559)
+commit: d4e6352dd
+
+## 0.14.3
+
+Bump appVersion (#4177)
+commit: 605d3d441
+
+## 0.14.2
+
+Master.InitContainerEnv: Init Container Env Vars (#3495)
+commit: c64abe27d
+
+## 0.14.1
+
+Allow more configuration of Jenkins agent service (#4028)
+commit: fc82f39b2
+
+## 0.14.0
+
+Add affinity settings (#3839)
+commit: 64e82fa6a
+
+## 0.13.5
+
+bump test timeouts (#3886)
+commit: cd05dd99c
+
+## 0.13.4
+
+Add OWNERS to jenkins chart (#3881)
+commit: 1c106b9c8
+
+## 0.13.3
+
+Add fullnameOverride support (#3705)
+commit: ec8080839
+
+## 0.13.2
+
+Update README.md (#3638)
+commit: f6d274c37
+
+## 0.13.1
+
+Lower initial healthcheck delay (#3463)
+commit: 9b99db67c
+
+## 0.13.0
+
+Provision credentials.xml, secrets files and jobs (#3316)
+commit: d305c5961
+
+## 0.12.1
+
+fix the default value for nodeUsageMode. (#3299)
+commit: b68d19516
+
+## 0.12.0
+
+Recreate pods when CustomConfigMap is true and there are changes to the ConfigMap (which is how the vanilla chart works) (#3181)
+commit: 86d29f804
+
+## 0.11.1
+
+Optionally adds liveness and readiness probes to jenkins (#3245)
+commit: 8b9aa73ee
+
+## 0.11.0
+
+Feature/run jenkins as non root user (#2899)
+commit: 8918f4175
+
+## 0.10.3
+
+template the version to keep them synced (#3084)
+commit: 35e7fa49a
+
+## 0.10.2
+
+Update Chart.yaml
+commit: e3e617a0b
+
+## 0.10.1
+
+Merge branch 'master' into jenkins-test-timeout
+commit: 9a230a6b1
+
+Double retry count for Jenkins test
+commit: 129c8e824
+
+Jenkins: Update README | Master.ServiceAnnotations (#2757)
+commit: 6571810bc
+
+## 0.10.0
+
+Update Jenkins images and plugins (#2496)
+commit: 2e2622682
+
+## 0.9.4
+
+Updating to remove the `.lock` directory as well (#2747)
+commit: 6e676808f
+
+## 0.9.3
+
+Use variable for service port when testing (#2666)
+commit: d044f99be
+
+## 0.9.2
+
+Review jenkins networkpolicy docs (#2618)
+commit: 49911e458
+
+Add image pull secrets to jenkins templates (#1389)
+commit: 4dfae21fd
+
+## 0.9.1
+
+Added persistent volume claim annotations (#2619)
+commit: ac9e5306e
+
+Fix failing CI lint (#2758)
+commit: 26f709f0e
+
+## 0.9.0
+
+namespace defined templates with chart name (#2140)
+commit: 408ae0b3f
+
+## 0.8.9
+
+added useSecurity and adminUser to params (#1903)
+commit: 39d2a03cd
+
+Use storageClassName for jenkins. (#1997)
+commit: 802f6449b
+
+## 0.8.8
+
+Remove old plugin locks before installing plugins (#1746)
+commit: 6cd7b8ff4
+
+promote initContainrs to podspec (#1740)
+commit: fecc804fc
+
+## 0.8.7
+
+add optional LoadBalancerIP option. (#1568)
+commit: d39f11408
+
+## 0.8.6
+
+Fix bad key in values.yaml (#1633)
+commit: dc27e5af3
+
+## 0.8.5
+
+Update Jenkins to support node selectors for agents. (#1532)
+commit: 4af5810ff
+
+## 0.8.4
+
+Add support for supplying JENKINS_OPTS and/or uri prefix (#1405)
+commit: 6a331901a
+
+## 0.8.3
+
+Add serviceAccountName to deployment (#1477)
+commit: 0dc349b44
+
+## 0.8.2
+
+Remove path from ingress specification to allow other paths (#1599)
+commit: e727f6b32
+
+Update git plugin to 3.4.0 for CVE-2017-1000084 (#1505)
+commit: 03482f995
+
+## 0.8.1
+
+Use consistent whitespace in template placeholders (#1437)
+commit: 912f50c71
+
+add configurable service annotations #1234 (#1244)
+commit: 286861ca8
+
+## 0.8.0
+
+Jenkins v0.8.0 (#1385)
+commit: 0009a2393
+
+## 0.7.4
+
+Use imageTag as version in config map (#1333)
+commit: e8bb6ebb4
+
+## 0.7.3
+
+Add NetworkPolicy to Jenkins (#1228)
+commit: 572b36c6d
+
+## 0.7.2
+
+- Workflow plugin pin (#1178)
+ commit: ac3a0c7bc
+
+## 0.7.1
+
+copy over plugins.txt in case of update (#1222)
+commit: 75b5b1174
+
+## 0.7.0
+
+add jmx option (#964)
+commit: 6ae8d1945
+
+## 0.6.4
+
+update jenkins to latest LTS 2.46.3 (#1182)
+commit: ad90b4c27
+
+## 0.6.3
+
+Update chart maints to gh u/n (#1107)
+commit: f357b77ed
+
+## 0.6.2
+
+Add Agent.Privileged option (#957)
+commit: 2cf4aced2
+
+## 0.6.1
+
+Upgrade jenkins to 2.46.2 (#971)
+commit: 41bd742b4
+
+## 0.6.0
+
+Smoke test for Jenkins Chart (#944)
+commit: 110441054
+
+## 0.5.1
+
+removed extra space from hardcoded password (#925)
+commit: 85a9b9123
+
+## 0.5.0
+
+move config to init-container allowing use of upstream containers (#921)
+commit: 1803c3d33
+
+## 0.4.1
+
+add ability to toggle jnlp-agent podTemplate generation (#918)
+commit: accd53203
+
+## 0.4.0
+
+Jenkins add script approval (#916)
+commit: c1746656e
+
+## 0.3.1
+
+Update Jenkins to Latest LTS fixes #731 (#733)
+commit: e9a3aed8b
+
+## 0.3.0
+
+Added option to add Jenkins init scripts (#617)
+commit: b889623d0
+
+## 0.2.0
+
+Add existing PVC (#716)
+commit: 05271f145
+
+## 0.1.15
+
+use Master.ServicePort in config.xml (#769)
+commit: f351f4b16
+
+## 0.1.14
+
+Added option to disable security on master node (#403)
+commit: 3a6113d18
+
+## 0.1.13
+
+Added: extra mount points support for jenkins master (#474)
+commit: fab0f7eb1
+
+## 0.1.12
+
+fix storageclass config typo (#548)
+commit: 6fc0ff242
+
+## 0.1.10
+
+Changed default value of Kubernetes Cloud name to match one in kubernetes plugin (#404)
+commit: 68351304a
+
+Add support for overriding the Jenkins ConfigMap (#524)
+commit: f97ca53b1
+
+## 0.1.9
+
+Added jenkins-master ingress support (#402)
+commit: d76a09588
+
+## 0.1.8
+
+Change description (#553)
+commit: 91f5c24e1
+
+Removed default Persistence.StorageClass: generic (#530)
+commit: c87494c10
+
+Update to the recommended pvc patterns. (#448)
+commit: a7fc595aa
+
+Remove helm.sh/created annotations (#505)
+commit: f380da2fb
+
+## 0.1.7
+
+add support for explicit NodePort on jenkins chart (#342)
+commit: f63c188da
+
+Add configurable loadBalancerSourceRanges for jenkins chart (#360)
+commit: 44007c50e
+
+Update Jenkins version to current LTS (2.19.4) and Kubernetes Plugin to 0.10 (#341)
+commit: 6c8678167
+
+## 0.1.6
+
+Add imagePullPolicy to init container (#295)
+commit: 103ee1952
+
+## 0.1.5
+
+bump chart version with PVC metadata label additions
+commit: 4aa9cf5b1
+
+## 0.1.4
+
+removed `*` from `jenkins/templates/NOTES.txt`
+commit: 76212230b
+
+apply standard metadata labels to PVC's
+commit: 58b730836
+
+specify namespace in `kubectl get svc` commands in NOTES.txt
+commit: 7d3287e81
+
+Update Jenkins version to current LTS (#194)
+commit: 2c0404049
+
+## 0.1.1
+
+escape fixed
+commit: 2026e1d15
+
+.status.loadBalancer.ingress[0].ip is empty in AWS
+commit: 1810e37f4
+
+.status.loadBalancer.ingress[0].ip is empty in AWS
+commit: 3cbd3ced6
+
+Remove 'Getting Started:' from various NOTES.txt. (#181)
+commit: 2f63fd524
+
+docs(\*): update READMEs to reference chart repos (#119)
+commit: c7d1bff05
+
+## 0.1.0
+
+Move first batch of PVC charts to stable
+commit: d745f4879
diff --git a/charts/jenkins/Chart.yaml b/charts/jenkins/Chart.yaml
new file mode 100644
index 0000000..2a42c71
--- /dev/null
+++ b/charts/jenkins/Chart.yaml
@@ -0,0 +1,49 @@
+annotations:
+ artifacthub.io/category: integration-delivery
+ artifacthub.io/changes: |
+ - Fix Prometheus controller name.
+ artifacthub.io/images: |
+ - name: jenkins
+ image: docker.io/jenkins/jenkins:2.440.2-jdk17
+ - name: k8s-sidecar
+ image: docker.io/kiwigrid/k8s-sidecar:1.26.1
+ - name: inbound-agent
+ image: jenkins/inbound-agent:3206.vb_15dcf73f6a_9-3
+ artifacthub.io/license: Apache-2.0
+ artifacthub.io/links: |
+ - name: Chart Source
+ url: https://github.com/jenkinsci/helm-charts/tree/main/charts/jenkins
+ - name: Jenkins
+ url: https://www.jenkins.io/
+ - name: support
+ url: https://github.com/jenkinsci/helm-charts/issues
+apiVersion: v2
+appVersion: 2.440.2
+description: 'Jenkins - Build great things at any scale! As the leading open source
+ automation server, Jenkins provides over 1800 plugins to support building, deploying
+ and automating any project. '
+home: https://www.jenkins.io/
+icon: https://get.jenkins.io/art/jenkins-logo/logo.svg
+keywords:
+- jenkins
+- ci
+- devops
+maintainers:
+- email: maor.friedman@redhat.com
+ name: maorfr
+- email: mail@torstenwalter.de
+ name: torstenwalter
+- email: garridomota@gmail.com
+ name: mogaal
+- email: wmcdona89@gmail.com
+ name: wmcdona89
+- email: timjacomb1@gmail.com
+ name: timja
+name: jenkins
+sources:
+- https://github.com/jenkinsci/jenkins
+- https://github.com/jenkinsci/docker-inbound-agent
+- https://github.com/maorfr/kube-tasks
+- https://github.com/jenkinsci/configuration-as-code-plugin
+type: application
+version: 5.1.5
diff --git a/charts/jenkins/README.md b/charts/jenkins/README.md
new file mode 100644
index 0000000..4ddd1fa
--- /dev/null
+++ b/charts/jenkins/README.md
@@ -0,0 +1,706 @@
+# Jenkins
+
+[](https://artifacthub.io/packages/helm/jenkinsci/jenkins)
+[](https://opensource.org/licenses/Apache-2.0)
+[](https://github.com/jenkinsci/helm-charts/releases)
+[](https://app.gitter.im/#/room/#jenkins-ci:matrix.org)
+
+[Jenkins](https://www.jenkins.io/) is the leading open source automation server, Jenkins provides over 1800 plugins to support building, deploying and automating any project.
+
+This chart installs a Jenkins server which spawns agents on [Kubernetes](http://kubernetes.io) utilizing the [Jenkins Kubernetes plugin](https://plugins.jenkins.io/kubernetes/).
+
+Inspired by the awesome work of [Carlos Sanchez](https://github.com/carlossg).
+
+## Get Repository Info
+
+```console
+helm repo add jenkins https://charts.jenkins.io
+helm repo update
+```
+
+_See [`helm repo`](https://helm.sh/docs/helm/helm_repo/) for command documentation._
+
+## Install Chart
+
+```console
+# Helm 3
+$ helm install [RELEASE_NAME] jenkins/jenkins [flags]
+```
+
+_See [configuration](#configuration) below._
+
+_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
+
+## Uninstall Chart
+
+```console
+# Helm 3
+$ helm uninstall [RELEASE_NAME]
+```
+
+This removes all the Kubernetes components associated with the chart and deletes the release.
+
+_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
+
+## Upgrade Chart
+
+```console
+# Helm 3
+$ helm upgrade [RELEASE_NAME] jenkins/jenkins [flags]
+```
+
+_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
+
+Visit the chart's [CHANGELOG](https://github.com/jenkinsci/helm-charts/blob/main/charts/jenkins/CHANGELOG.md) to view the chart's release history.
+For migration between major version check [migration guide](#migration-guide).
+
+## Building weekly releases
+
+The default charts target Long-Term-Support (LTS) releases of Jenkins.
+To use other versions the easiest way is to update the image tag to the version you want.
+You can also rebuild the chart if you want the `appVersion` field to match.
+
+## Configuration
+
+See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing).
+To see all configurable options with detailed comments, visit the chart's [values.yaml](https://github.com/jenkinsci/helm-charts/blob/main/charts/jenkins/values.yaml), or run these configuration commands:
+
+```console
+# Helm 3
+$ helm show values jenkins/jenkins
+```
+
+For a summary of all configurable options, see [VALUES_SUMMARY.md](https://github.com/jenkinsci/helm-charts/blob/main/charts/jenkins/VALUES_SUMMARY.md).
+
+### Configure Security Realm and Authorization Strategy
+
+This chart configured a `securityRealm` and `authorizationStrategy` as shown below:
+
+```yaml
+controller:
+ JCasC:
+ securityRealm: |-
+ local:
+ allowsSignup: false
+ enableCaptcha: false
+ users:
+ - id: "${chart-admin-username}"
+ name: "Jenkins Admin"
+ password: "${chart-admin-password}"
+ authorizationStrategy: |-
+ loggedInUsersCanDoAnything:
+ allowAnonymousRead: false
+```
+
+With the configuration above there is only a single user.
+This is fine for getting started quickly, but it needs to be adjusted for any serious environment.
+
+So you should adjust this to suite your needs.
+That could be using LDAP / OIDC / .. as authorization strategy and use globalMatrix as authorization strategy to configure more fine-grained permissions.
+
+### Consider using a custom image
+
+This chart allows the user to specify plugins which should be installed. However, for production use cases one should consider to build a custom Jenkins image which has all required plugins pre-installed.
+This way you can be sure which plugins Jenkins is using when starting up and you avoid trouble in case of connectivity issues to the Jenkins update site.
+
+The [docker repository](https://github.com/jenkinsci/docker) for the Jenkins image contains [documentation](https://github.com/jenkinsci/docker#preinstalling-plugins) how to do it.
+
+Here is an example how that can be done:
+
+```Dockerfile
+FROM jenkins/jenkins:lts
+RUN jenkins-plugin-cli --plugins kubernetes workflow-aggregator git configuration-as-code
+```
+
+NOTE: If you want a reproducible build then you should specify a non-floating tag for the image `jenkins/jenkins:2.249.3` and specify plugin versions.
+
+Once you built the image and pushed it to your registry you can specify it in your values file like this:
+
+```yaml
+controller:
+ image: "registry/my-jenkins"
+ tag: "v1.2.3"
+ installPlugins: false
+```
+
+Notice: `installPlugins` is set to false to disable plugin download. In this case, the image `registry/my-jenkins:v1.2.3` must have the plugins specified as default value for [the `controller.installPlugins` directive](https://github.com/jenkinsci/helm-charts/blob/main/charts/jenkins/VALUES_SUMMARY.md#jenkins-plugins) to ensure that the configuration side-car system works as expected.
+
+In case you are using a private registry you can use 'imagePullSecretName' to specify the name of the secret to use when pulling the image:
+
+```yaml
+controller:
+ image: "registry/my-jenkins"
+ tag: "v1.2.3"
+ imagePullSecretName: registry-secret
+ installPlugins: false
+```
+
+### External URL Configuration
+
+If you are using the ingress definitions provided by this chart via the `controller.ingress` block the configured hostname will be the ingress hostname starting with `https://` or `http://` depending on the `tls` configuration.
+The Protocol can be overwritten by specifying `controller.jenkinsUrlProtocol`.
+
+If you are not using the provided ingress you can specify `controller.jenkinsUrl` to change the URL definition.
+
+### Configuration as Code
+
+Jenkins Configuration as Code (JCasC) is now a standard component in the Jenkins project.
+To allow JCasC's configuration from the helm values, the plugin [`configuration-as-code`](https://plugins.jenkins.io/configuration-as-code/) must be installed in the Jenkins Controller's Docker image (which is the case by default as specified by the [default value of the directive `controller.installPlugins`](https://github.com/jenkinsci/helm-charts/blob/main/charts/jenkins/VALUES_SUMMARY.md#jenkins-plugins)).
+
+JCasc configuration is passed through Helm values under the key `controller.JCasC`.
+The section ["Jenkins Configuration as Code (JCasC)" of the page "VALUES_SUMMARY.md"](https://github.com/jenkinsci/helm-charts/blob/main/charts/jenkins/VALUES_SUMMARY.md#jenkins-configuration-as-code-jcasc) lists all the possible directives.
+
+In particular, you may specify custom JCasC scripts by adding sub-key under the `controller.JCasC.configScripts` for each configuration area where each corresponds to a plugin or section of the UI.
+
+The sub-keys (prior to `|` character) are only labels used to give the section a meaningful name.
+The only restriction is they must conform to RFC 1123 definition of a DNS label, so they may only contain lowercase letters, numbers, and hyphens.
+
+Each key will become the name of a configuration yaml file on the controller in `/var/jenkins_home/casc_configs` (by default) and will be processed by the Configuration as Code Plugin during Jenkins startup.
+
+The lines after each `|` become the content of the configuration yaml file.
+
+The first line after this is a JCasC root element, e.g. jenkins, credentials, etc.
+
+Best reference is the Documentation link here: `https://<jenkins_url>/configuration-as-code`.
+
+The example below sets custom systemMessage:
+
+```yaml
+controller:
+ JCasC:
+ configScripts:
+ welcome-message: |
+ jenkins:
+ systemMessage: Welcome to our CI\CD server.
+```
+
+More complex example that creates ldap settings:
+
+```yaml
+controller:
+ JCasC:
+ configScripts:
+ ldap-settings: |
+ jenkins:
+ securityRealm:
+ ldap:
+ configurations:
+ - server: ldap.acme.com
+ rootDN: dc=acme,dc=uk
+ managerPasswordSecret: ${LDAP_PASSWORD}
+ groupMembershipStrategy:
+ fromUserRecord:
+ attributeName: "memberOf"
+```
+
+Keep in mind that default configuration file already contains some values that you won't be able to override under configScripts section.
+
+For example, you can not configure Jenkins URL and System Admin email address like this because of conflicting configuration error.
+
+Incorrect:
+
+```yaml
+controller:
+ JCasC:
+ configScripts:
+ jenkins-url: |
+ unclassified:
+ location:
+ url: https://example.com/jenkins
+ adminAddress: example@mail.com
+```
+
+Correct:
+
+```yaml
+controller:
+ jenkinsUrl: https://example.com/jenkins
+ jenkinsAdminEmail: example@mail.com
+```
+
+Further JCasC examples can be found [here](https://github.com/jenkinsci/configuration-as-code-plugin/tree/master/demos).
+
+#### Breaking out large Config as Code scripts
+
+Jenkins Config as Code scripts can become quite large, and maintaining all of your scripts within one yaml file can be difficult. The Config as Code plugin itself suggests updating the `CASC_JENKINS_CONFIG` environment variable to be a comma separated list of paths for the plugin to traverse, picking up the yaml files as needed.
+However, under the Jenkins helm chart, this `CASC_JENKINS_CONFIG` value is maintained through the templates. A better solution is to split your `controller.JCasC.configScripts` into separate values files, and provide each file during the helm install.
+
+For example, you can have a values file (e.g values_main.yaml) that defines the values described in the `VALUES_SUMMARY.md` for your Jenkins configuration:
+
+```yaml
+jenkins:
+ controller:
+ jenkinsUrlProtocol: https
+ installPlugins: false
+ ...
+```
+
+In a second file (e.g values_jenkins_casc.yaml), you can define a section of your config scripts:
+
+```yaml
+jenkins:
+ controller:
+ JCasC:
+ configScripts:
+ jenkinsCasc: |
+ jenkins:
+ disableRememberMe: false
+ mode: NORMAL
+ ...
+```
+
+And keep extending your config scripts by creating more files (so not all config scripts are located in one yaml file for better maintenance):
+
+values_jenkins_unclassified.yaml
+
+```yaml
+jenkins:
+ controller:
+ JCasC:
+ configScripts:
+ unclassifiedCasc: |
+ unclassified:
+ ...
+```
+
+When installing, you provide all relevant yaml files (e.g `helm install -f values_main.yaml -f values_jenkins_casc.yaml -f values_jenkins_unclassified.yaml ...`). Instead of updating the `CASC_JENKINS_CONFIG` environment variable to include multiple paths, multiple CasC yaml files will be created in the same path `var/jenkins_home/casc_configs`.
+
+#### Config as Code With or Without Auto-Reload
+
+Config as Code changes (to `controller.JCasC.configScripts`) can either force a new pod to be created and only be applied at next startup, or can be auto-reloaded on-the-fly.
+If you set `controller.sidecars.configAutoReload.enabled` to `true`, a second, auxiliary container will be installed into the Jenkins controller pod, known as a "sidecar".
+This watches for changes to configScripts, copies the content onto the Jenkins file-system and issues a POST to `http://<jenkins_url>/reload-configuration-as-code` with a pre-shared key.
+You can monitor this sidecar's logs using command `kubectl logs <controller_pod> -c config-reload -f`.
+If you want to enable auto-reload then you also need to configure rbac as the container which triggers the reload needs to watch the config maps:
+
+```yaml
+controller:
+ sidecars:
+ configAutoReload:
+ enabled: true
+rbac:
+ create: true
+```
+
+### Allow Limited HTML Markup in User-Submitted Text
+
+Some third-party systems (e.g. GitHub) use HTML-formatted data in their payload sent to a Jenkins webhook (e.g. URL of a pull-request being built).
+To display such data as processed HTML instead of raw text set `controller.enableRawHtmlMarkupFormatter` to true.
+This option requires installation of the [OWASP Markup Formatter Plugin (antisamy-markup-formatter)](https://plugins.jenkins.io/antisamy-markup-formatter/).
+This plugin is **not** installed by default but may be added to `controller.additionalPlugins`.
+
+### Change max connections to Kubernetes API
+When using agents with containers other than JNLP, The kubernetes plugin will communicate with those containers using the Kubernetes API. this changes the maximum concurrent connections
+```yaml
+agent:
+ maxRequestsPerHostStr: "32"
+```
+This will change the configuration of the kubernetes "cloud" (as called by jenkins) that is created automatically as part of this helm chart.
+
+### Change container cleanup timeout API
+For tasks that use very large images, this timeout can be increased to avoid early termination of the task while the Kubernetes pod is still deploying.
+```yaml
+agent:
+ retentionTimeout: "32"
+```
+This will change the configuration of the kubernetes "cloud" (as called by jenkins) that is created automatically as part of this helm chart.
+
+### Change seconds to wait for pod to be running
+This will change how long Jenkins will wait (seconds) for pod to be in running state.
+```yaml
+agent:
+ waitForPodSec: "32"
+```
+This will change the configuration of the kubernetes "cloud" (as called by jenkins) that is created automatically as part of this helm chart.
+
+### Mounting Volumes into Agent Pods
+
+Your Jenkins Agents will run as pods, and it's possible to inject volumes where needed:
+
+```yaml
+agent:
+ volumes:
+ - type: Secret
+ secretName: jenkins-mysecrets
+ mountPath: /var/run/secrets/jenkins-mysecrets
+```
+
+The supported volume types are: `ConfigMap`, `EmptyDir`, `HostPath`, `Nfs`, `PVC`, `Secret`.
+Each type supports a different set of configurable attributes, defined by [the corresponding Java class](https://github.com/jenkinsci/kubernetes-plugin/tree/master/src/main/java/org/csanchez/jenkins/plugins/kubernetes/volumes).
+
+### NetworkPolicy
+
+To make use of the NetworkPolicy resources created by default, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin).
+
+[Install](#install-chart) helm chart with network policy enabled by setting `networkPolicy.enabled` to `true`.
+
+You can use `controller.networkPolicy.internalAgents` and `controller.networkPolicy.externalAgents` stanzas for fine-grained controls over where internal/external agents can connect from.
+Internal ones are allowed based on pod labels and (optionally) namespaces, and external ones are allowed based on IP ranges.
+
+### Script approval list
+
+`controller.scriptApproval` allows to pass function signatures that will be allowed in pipelines.
+Example:
+
+```yaml
+controller:
+ scriptApproval:
+ - "method java.util.Base64$Decoder decode java.lang.String"
+ - "new java.lang.String byte[]"
+ - "staticMethod java.util.Base64 getDecoder"
+```
+
+### Custom Labels
+
+`controller.serviceLabels` can be used to add custom labels in `jenkins-controller-svc.yaml`.
+For example:
+
+```yaml
+ServiceLabels:
+ expose: true
+```
+
+### Persistence
+
+The Jenkins image stores persistence under `/var/jenkins_home` path of the container.
+A dynamically managed Persistent Volume Claim is used to keep the data across deployments, by default.
+This is known to work in GCE, AWS, and minikube. Alternatively, a previously configured Persistent Volume Claim can be used.
+
+It is possible to mount several volumes using `persistence.volumes` and `persistence.mounts` parameters.
+See additional `persistence` values using [configuration commands](#configuration).
+
+#### Existing PersistentVolumeClaim
+
+1. Create the PersistentVolume
+2. Create the PersistentVolumeClaim
+3. [Install](#install-chart) the chart, setting `persistence.existingClaim` to `PVC_NAME`
+
+#### Long Volume Attach/Mount Times
+
+Certain volume type and filesystem format combinations may experience long
+attach/mount times, [10 or more minutes][K8S_VOLUME_TIMEOUT], when using
+`fsGroup`. This issue may result in the following entries in the pod's event
+history:
+
+```console
+Warning FailedMount 38m kubelet, aks-default-41587790-2 Unable to attach or mount volumes: unmounted volumes=[jenkins-home], unattached volumes=[plugins plugin-dir jenkins-token-rmq2g sc-config-volume tmp jenkins-home jenkins-config secrets-dir]: timed out waiting for the condition
+```
+
+In these cases, experiment with replacing `fsGroup` with
+`supplementalGroups` in the pod's `securityContext`. This can be achieved by
+setting the `controller.podSecurityContextOverride` Helm chart value to
+something like:
+
+```yaml
+controller:
+ podSecurityContextOverride:
+ runAsNonRoot: true
+ runAsUser: 1000
+ supplementalGroups: [1000]
+```
+
+This issue has been reported on [azureDisk with ext4][K8S_VOLUME_TIMEOUT] and
+on [Alibaba cloud][K8S_VOLUME_TIMEOUT_ALIBABA].
+
+[K8S_VOLUME_TIMEOUT]: https://github.com/kubernetes/kubernetes/issues/67014
+[K8S_VOLUME_TIMEOUT_ALIBABA]: https://github.com/kubernetes/kubernetes/issues/67014#issuecomment-698770511
+
+#### Storage Class
+
+It is possible to define which storage class to use, by setting `persistence.storageClass` to `[customStorageClass]`.
+If set to a dash (`-`), dynamic provisioning is disabled.
+If the storage class is set to null or left undefined (`""`), the default provisioner is used (gp2 on AWS, standard on GKE, AWS & OpenStack).
+
+### Additional Secrets
+
+Additional secrets and Additional Existing Secrets,
+can be mounted into the Jenkins controller through the chart or created using `controller.additionalSecrets` or `controller.additionalExistingSecrets`.
+A common use case might be identity provider credentials if using an external LDAP or OIDC-based identity provider.
+The secret may then be referenced in JCasC configuration (see [JCasC configuration](#configuration-as-code)).
+
+`values.yaml` controller section, referencing mounted secrets:
+```yaml
+controller:
+ # the 'name' and 'keyName' are concatenated with a '-' in between, so for example:
+ # an existing secret "secret-credentials" and a key inside it named "github-password" should be used in Jcasc as ${secret-credentials-github-password}
+ # 'name' and 'keyName' must be lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-',
+ # and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc')
+ # existingSecret existing secret "secret-credentials" and a key inside it named "github-username" should be used in Jcasc as ${github-username}
+ # When using existingSecret no need to specify the keyName under additionalExistingSecrets.
+ existingSecret: secret-credentials
+
+ additionalExistingSecrets:
+ - name: secret-credentials
+ keyName: github-username
+ - name: secret-credentials
+ keyName: github-password
+ - name: secret-credentials
+ keyName: token
+
+ additionalSecrets:
+ - name: client_id
+ value: abc123
+ - name: client_secret
+ value: xyz999
+ JCasC:
+ securityRealm: |
+ oic:
+ clientId: ${client_id}
+ clientSecret: ${client_secret}
+ ...
+ configScripts:
+ jenkins-casc-configs: |
+ credentials:
+ system:
+ domainCredentials:
+ - credentials:
+ - string:
+ description: "github access token"
+ id: "github_app_token"
+ scope: GLOBAL
+ secret: ${secret-credentials-token}
+ - usernamePassword:
+ description: "github access username password"
+ id: "github_username_pass"
+ password: ${secret-credentials-github-password}
+ scope: GLOBAL
+ username: ${secret-credentials-github-username}
+```
+
+For more information, see [JCasC documentation](https://github.com/jenkinsci/configuration-as-code-plugin/blob/master/docs/features/secrets.adoc#kubernetes-secrets).
+
+### Secret Claims from HashiCorp Vault
+
+It's possible for this chart to generate `SecretClaim` resources in order to automatically create and maintain Kubernetes `Secrets` from HashiCorp [Vault](https://www.vaultproject.io/) via [`kube-vault-controller`](https://github.com/roboll/kube-vault-controller)
+
+These `Secrets` can then be referenced in the same manner as Additional Secrets above.
+
+This can be achieved by defining required Secret Claims within `controller.secretClaims`, as follows:
+```yaml
+controller:
+ secretClaims:
+ - name: jenkins-secret
+ path: secret/path
+ - name: jenkins-short-ttl
+ path: secret/short-ttl-path
+ renew: 60
+```
+
+### RBAC
+
+RBAC is enabled by default. If you want to disable it you will need to set `rbac.create` to `false`.
+
+### Adding Custom Pod Templates
+
+It is possible to add custom pod templates for the default configured kubernetes cloud.
+Add a key under `agent.podTemplates` for each pod template. Each key (prior to `|` character) is just a label, and can be any value.
+Keys are only used to give the pod template a meaningful name. The only restriction is they may only contain RFC 1123 \ DNS label characters: lowercase letters, numbers, and hyphens. Each pod template can contain multiple containers.
+There's no need to add the _jnlp_ container since the kubernetes plugin will automatically inject it into the pod.
+For this pod templates configuration to be loaded the following values must be set:
+
+```yaml
+controller.JCasC.defaultConfig: true
+```
+
+The example below creates a python pod template in the kubernetes cloud:
+
+```yaml
+agent:
+ podTemplates:
+ python: |
+ - name: python
+ label: jenkins-python
+ serviceAccount: jenkins
+ containers:
+ - name: python
+ image: python:3
+ command: "/bin/sh -c"
+ args: "cat"
+ ttyEnabled: true
+ privileged: true
+ resourceRequestCpu: "400m"
+ resourceRequestMemory: "512Mi"
+ resourceLimitCpu: "1"
+ resourceLimitMemory: "1024Mi"
+```
+
+Best reference is `https://<jenkins_url>/configuration-as-code/reference#Cloud-kubernetes`.
+
+### Adding Pod Templates Using additionalAgents
+
+`additionalAgents` may be used to configure additional kubernetes pod templates.
+Each additional agent corresponds to `agent` in terms of the configurable values and inherits all values from `agent` so you only need to specify values which differ.
+For example:
+
+```yaml
+agent:
+ podName: default
+ customJenkinsLabels: default
+ # set resources for additional agents to inherit
+ resources:
+ limits:
+ cpu: "1"
+ memory: "2048Mi"
+
+additionalAgents:
+ maven:
+ podName: maven
+ customJenkinsLabels: maven
+ # An example of overriding the jnlp container
+ # sideContainerName: jnlp
+ image: jenkins/jnlp-agent-maven
+ tag: latest
+ python:
+ podName: python
+ customJenkinsLabels: python
+ sideContainerName: python
+ image: python
+ tag: "3"
+ command: "/bin/sh -c"
+ args: "cat"
+ TTYEnabled: true
+```
+
+### Ingress Configuration
+
+This chart provides ingress resources configurable via the `controller.ingress` block.
+
+The simplest configuration looks like the following:
+
+```yaml
+controller:
+ ingress:
+ enabled: true
+ paths: []
+ apiVersion: "extensions/v1beta1"
+ hostName: jenkins.example.com
+```
+
+This snippet configures an ingress rule for exposing jenkins at `jenkins.example.com`
+
+You can define labels and annotations via `controller.ingress.labels` and `controller.ingress.annotations` respectively.
+Additionally, you can configure the ingress tls via `controller.ingress.tls`.
+By default, this ingress rule exposes all paths.
+If needed this can be overwritten by specifying the wanted paths in `controller.ingress.paths`
+
+If you want to configure a secondary ingress e.g. you don't want the jenkins instance exposed but still want to receive webhooks you can configure `controller.secondaryingress`.
+The secondaryingress doesn't expose anything by default and has to be configured via `controller.secondaryingress.paths`:
+
+```yaml
+controller:
+ ingress:
+ enabled: true
+ apiVersion: "extensions/v1beta1"
+ hostName: "jenkins.internal.example.com"
+ annotations:
+ kubernetes.io/ingress.class: "internal"
+ secondaryingress:
+ enabled: true
+ apiVersion: "extensions/v1beta1"
+ hostName: "jenkins-scm.example.com"
+ annotations:
+ kubernetes.io/ingress.class: "public"
+ paths:
+ - /github-webhook
+```
+
+## Prometheus Metrics
+
+If you want to expose Prometheus metrics you need to install the [Jenkins Prometheus Metrics Plugin](https://github.com/jenkinsci/prometheus-plugin).
+It will expose an endpoint (default `/prometheus`) with metrics where a Prometheus Server can scrape.
+
+If you have implemented [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator), you can set `controller.prometheus.enabled` to `true` to configure a `ServiceMonitor` and `PrometheusRule`.
+If you want to further adjust alerting rules you can do so by configuring `controller.prometheus.alertingrules`
+
+If you have implemented Prometheus without using the operator, you can leave `controller.prometheus.enabled` set to `false`.
+
+### Running Behind a Forward Proxy
+
+The controller pod uses an Init Container to install plugins etc. If you are behind a corporate proxy it may be useful to set `controller.initContainerEnv` to add environment variables such as `http_proxy`, so that these can be downloaded.
+
+Additionally, you may want to add env vars for the init container, the Jenkins container, and the JVM (`controller.javaOpts`):
+
+```yaml
+controller:
+ initContainerEnv:
+ - name: http_proxy
+ value: "http://192.168.64.1:3128"
+ - name: https_proxy
+ value: "http://192.168.64.1:3128"
+ - name: no_proxy
+ value: ""
+ - name: JAVA_OPTS
+ value: "-Dhttps.proxyHost=proxy_host_name_without_protocol -Dhttps.proxyPort=3128"
+ containerEnv:
+ - name: http_proxy
+ value: "http://192.168.64.1:3128"
+ - name: https_proxy
+ value: "http://192.168.64.1:3128"
+ javaOpts: >-
+ -Dhttp.proxyHost=192.168.64.1
+ -Dhttp.proxyPort=3128
+ -Dhttps.proxyHost=192.168.64.1
+ -Dhttps.proxyPort=3128
+```
+
+### HTTPS Keystore Configuration
+
+[This configuration](https://wiki.jenkins.io/pages/viewpage.action?pageId=135468777) enables jenkins to use keystore in order to serve HTTPS.
+Here is the [value file section](https://wiki.jenkins.io/pages/viewpage.action?pageId=135468777#RunningJenkinswithnativeSSL/HTTPS-ConfigureJenkinstouseHTTPSandtheJKSkeystore) related to keystore configuration.
+Keystore itself should be placed in front of `jenkinsKeyStoreBase64Encoded` key and in base64 encoded format. To achieve that after having `keystore.jks` file simply do this: `cat keystore.jks | base64` and paste the output in front of `jenkinsKeyStoreBase64Encoded`.
+After enabling `httpsKeyStore.enable` make sure that `httpPort` and `targetPort` are not the same, as `targetPort` will serve HTTPS.
+Do not set `controller.httpsKeyStore.httpPort` to `-1` because it will cause readiness and liveliness prob to fail.
+If you already have a kubernetes secret that has keystore and its password you can specify its' name in front of `jenkinsHttpsJksSecretName`, You need to remember that your secret should have proper data key names `jenkins-jks-file` (or override the key name using `jenkinsHttpsJksSecretKey`)
+and `https-jks-password` (or override the key name using `jenkinsHttpsJksPasswordSecretKey`; additionally you can make it get the password from a different secret using `jenkinsHttpsJksPasswordSecretName`). Example:
+
+```yaml
+controller:
+ httpsKeyStore:
+ enable: true
+ jenkinsHttpsJksSecretName: ''
+ httpPort: 8081
+ path: "/var/jenkins_keystore"
+ fileName: "keystore.jks"
+ password: "changeit"
+ jenkinsKeyStoreBase64Encoded: ''
+```
+### AWS Security Group Policies
+
+To create SecurityGroupPolicies set `awsSecurityGroupPolicies.enabled` to true and add your policies. Each policy requires a `name`, array of `securityGroupIds` and a `podSelector`. Example:
+
+```yaml
+awsSecurityGroupPolicies:
+ enabled: true
+ policies:
+ - name: "jenkins-controller"
+ securityGroupIds:
+ - sg-123456789
+ podSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/component
+ operator: In
+ values:
+ - jenkins-controller
+```
+
+### Agent Direct Connection
+
+Set `directConnection` to `true` to allow agents to connect directly to a given TCP port without having to negotiate a HTTP(S) connection. This can allow you to have agent connections without an external HTTP(S) port. Example:
+
+```yaml
+agent:
+ jenkinsTunnel: "jenkinsci-agent:50000"
+ directConnection: true
+```
+
+## Migration Guide
+
+### From stable repository
+
+Upgrade an existing release from `stable/jenkins` to `jenkins/jenkins` seamlessly by ensuring you have the latest [repository info](#get-repository-info) and running the [upgrade commands](#upgrade-chart) specifying the `jenkins/jenkins` chart.
+
+### Major Version Upgrades
+
+Chart release versions follow [SemVer](../../CONTRIBUTING.md#versioning), where a MAJOR version change (example `1.0.0` -> `2.0.0`) indicates an incompatible breaking change needing manual actions.
+
+See [UPGRADING.md](./UPGRADING.md) for a list of breaking changes
diff --git a/charts/jenkins/UPGRADING.md b/charts/jenkins/UPGRADING.md
new file mode 100644
index 0000000..41e424d
--- /dev/null
+++ b/charts/jenkins/UPGRADING.md
@@ -0,0 +1,148 @@
+# Upgrade Notes
+
+## To 5.0.0
+- `controller.image`, `controller.tag`, and `controller.tagLabel` have been removed. If you want to overwrite the image you now need to configure any or all of:
+ - `controller.image.registry`
+ - `controller.image.repository`
+ - `controller.image.tag`
+ - `controller.image.tagLabel`
+- `controller.imagePullPolicy` has been removed. If you want to overwrite the pull policy you now need to configure `controller.image.pullPolicy`.
+- `controller.sidecars.configAutoReload.image` has been removed. If you want to overwrite the configAutoReload image you now need to configure any or all of:
+ - `controller.sidecars.configAutoReload.image.registry`
+ - `controller.sidecars.configAutoReload.image.repository`
+ - `controller.sidecars.configAutoReload.image.tag`
+- `controller.sidecars.other` has been renamed to `controller.sidecars.additionalSidecarContainers`.
+- `agent.image` and `agent.tag` have been removed. If you want to overwrite the agent image you now need to configure any or all of:
+ - `agent.image.repository`
+ - `agent.image.tag`
+ - The registry can still be overwritten by `agent.jnlpregistry`
+- `agent.additionalContainers[*].image` has been renamed to `agent.additionalContainers[*].image.repository`
+- `agent.additionalContainers[*].tag` has been renamed to `agent.additionalContainers[*].image.tag`
+- `additionalAgents.*.image` has been renamed to `additionalAgents.*.image.repository`
+- `additionalAgents.*.tag` has been renamed to `additionalAgents.*.image.tag`
+- `additionalClouds.*.additionalAgents.*.image` has been renamed to `additionalClouds.*.additionalAgents.*.image.repository`
+- `additionalClouds.*.additionalAgents.*.tag` has been renamed to `additionalClouds.*.additionalAgents.*.image.tag`
+- `helmtest.bats.image` has been split up to:
+ - `helmtest.bats.image.registry`
+ - `helmtest.bats.image.repository`
+ - `helmtest.bats.image.tag`
+- `controller.adminUsername` and `controller.adminPassword` have been renamed to `controller.admin.username` and `controller.admin.password` respectively
+- `controller.adminSecret` has been renamed to `controller.admin.createSecret`
+- `backup.*` was unmaintained and has thus been removed. See the following page for alternatives: [Kubernetes Backup and Migrations](https://nubenetes.com/kubernetes-backup-migrations/).
+
+## To 4.0.0
+Removes automatic `remotingSecurity` setting when using a container tag older than `2.326` (introduced in [`3.11.7`](./CHANGELOG.md#3117)). If you're using a version older than `2.326`, you should explicitly set `.controller.legacyRemotingSecurityEnabled` to `true`.
+
+## To 3.0.0
+
+* Check `securityRealm` and `authorizationStrategy` and adjust it.
+ Otherwise, your configured users and permissions will be overridden.
+* You need to use helm version 3 as the `Chart.yaml` uses `apiVersion: v2`.
+* All XML configuration options have been removed.
+ In case those are still in use you need to migrate to configuration as code.
+ Upgrade guide to 2.0.0 contains pointers how to do that.
+* Jenkins is now using a `StatefulSet` instead of a `Deployment`
+* terminology has been adjusted that's also reflected in values.yaml
+ The following values from `values.yaml` have been renamed:
+
+ * `master` => `controller`
+ * `master.useSecurity` => `controller.adminSecret`
+ * `master.slaveListenerPort` => `controller.agentListenerPort`
+ * `master.slaveHostPort` => `controller.agentListenerHostPort`
+ * `master.slaveKubernetesNamespace` => `agent.namespace`
+ * `master.slaveDefaultsProviderTemplate` => `agent.defaultsProviderTemplate`
+ * `master.slaveJenkinsUrl` => `agent.jenkinsUrl`
+ * `master.slaveJenkinsTunnel` => `agent.jenkinsTunnel`
+ * `master.slaveConnectTimeout` => `agent.kubernetesConnectTimeout`
+ * `master.slaveReadTimeout` => `agent.kubernetesReadTimeout`
+ * `master.slaveListenerServiceAnnotations` => `controller.agentListenerServiceAnnotations`
+ * `master.slaveListenerServiceType` => `controller.agentListenerServiceType`
+ * `master.slaveListenerLoadBalancerIP` => `controller.agentListenerLoadBalancerIP`
+ * `agent.slaveConnectTimeout` => `agent.connectTimeout`
+* Removed values:
+
+ * `master.imageTag`: use `controller.image` and `controller.tag` instead
+ * `slave.imageTag`: use `agent.image` and `agent.tag` instead
+
+## To 2.0.0
+
+Configuration as Code is now default + container does not run as root anymore.
+
+### Configuration as Code new default
+
+Configuration is done via [Jenkins Configuration as Code Plugin](https://github.com/jenkinsci/configuration-as-code-plugin) by default.
+That means that changes in values which result in a configuration change are always applied.
+In contrast, the XML configuration was only applied during the first start and never altered.
+
+:exclamation::exclamation::exclamation:
+Attention:
+This also means if you manually altered configuration then this will most likely be reset to what was configured by default.
+It also applies to `securityRealm` and `authorizationStrategy` as they are also configured using configuration as code.
+:exclamation::exclamation::exclamation:
+
+### Image does not run as root anymore
+
+It's not recommended to run containers in Kubernetes as `root`.
+
+❗Attention: If you had not configured a different user before then you need to ensure that your image supports the user and group ID configured and also manually change permissions of all files so that Jenkins is still able to use them.
+
+### Summary of updated values
+
+As version 2.0.0 only updates default values and nothing else it's still possible to migrate to this version and opt out of some or all new defaults.
+All you have to do is ensure the old values are set in your installation.
+
+Here we show which values have changed and the previous default values:
+
+```yaml
+controller:
+ runAsUser: 1000 # was unset before
+ fsGroup: 1000 # was unset before
+ JCasC:
+ enabled: true # was false
+ defaultConfig: true # was false
+ sidecars:
+ configAutoReload:
+ enabled: true # was false
+```
+
+### Migration steps
+
+Migration instructions heavily depend on your current setup.
+So think of the list below more as a general guideline of what should be done.
+
+- Ensure that the Jenkins image you are using contains a user with ID 1000 and a group with the same ID.
+ That's the case for `jenkins/jenkins:lts` image, which the chart uses by default
+- Make a backup of your existing installation especially the persistent volume
+- Ensure that you have the configuration as code plugin installed
+- Export your current settings via the plugin:
+ `Manage Jenkins` -> `Configuration as Code` -> `Download Configuration`
+- prepare your values file for the update e.g. add additional configuration as code setting that you need.
+ The export taken from above might be a good starting point for this.
+ In addition, the [demos](https://github.com/jenkinsci/configuration-as-code-plugin/tree/master/demos) from the plugin itself are quite useful.
+- Test drive those setting on a separate installation
+- Put Jenkins to Quiet Down mode so that it does not accept new jobs
+ `<JENKINS_URL>/quietDown`
+- Change permissions of all files and folders to the new user and group id:
+
+ ```console
+ kubectl exec -it <jenkins_pod> -c jenkins /bin/bash
+ chown -R 1000:1000 /var/jenkins_home
+ ```
+
+- Update Jenkins
+
+## To 1.0.0
+
+Breaking changes:
+
+- Values have been renamed to follow [helm recommended naming conventions](https://helm.sh/docs/chart_best_practices/#naming-conventions) so that all variables start with a lowercase letter and words are separated with camelcase
+- All resources are now using [helm recommended standard labels](https://helm.sh/docs/chart_best_practices/#standard-labels)
+
+As a result of the label changes also the selectors of the deployment have been updated.
+Those are immutable so trying an updated will cause an error like:
+
+```console
+Error: Deployment.apps "jenkins" is invalid: spec.selector: Invalid value: v1.LabelSelector{MatchLabels:map[string]string{"app.kubernetes.io/component":"jenkins-controller", "app.kubernetes.io/instance":"jenkins"}, MatchExpressions:[]v1.LabelSelectorRequirement(nil)}: field is immutable
+```
+
+In order to upgrade, [uninstall](./README.md#uninstall-chart) the Jenkins Deployment before upgrading:
diff --git a/charts/jenkins/VALUES.md b/charts/jenkins/VALUES.md
new file mode 100644
index 0000000..a9a4f47
--- /dev/null
+++ b/charts/jenkins/VALUES.md
@@ -0,0 +1,303 @@
+# Jenkins
+
+## Configuration
+
+The following tables list the configurable parameters of the Jenkins chart and their default values.
+
+## Values
+
+| Key | Type | Description | Default |
+|:----|:-----|:---------|:------------|
+| [additionalAgents](./values.yaml#L1138) | object | Configure additional | `{}` |
+| [additionalClouds](./values.yaml#L1163) | object | | `{}` |
+| [agent.TTYEnabled](./values.yaml#L1058) | bool | Allocate pseudo tty to the side container | `false` |
+| [agent.additionalContainers](./values.yaml#L1091) | list | Add additional containers to the agents | `[]` |
+| [agent.alwaysPullImage](./values.yaml#L951) | bool | Always pull agent container image before build | `false` |
+| [agent.annotations](./values.yaml#L1087) | object | Annotations to apply to the pod | `{}` |
+| [agent.args](./values.yaml#L1052) | string | Arguments passed to command to execute | `"${computer.jnlpmac} ${computer.name}"` |
+| [agent.command](./values.yaml#L1050) | string | Command to execute when side container starts | `nil` |
+| [agent.componentName](./values.yaml#L919) | string | | `"jenkins-agent"` |
+| [agent.connectTimeout](./values.yaml#L1085) | int | Timeout in seconds for an agent to be online | `100` |
+| [agent.containerCap](./values.yaml#L1060) | int | Max number of agents to launch | `10` |
+| [agent.customJenkinsLabels](./values.yaml#L916) | list | Append Jenkins labels to the agent | `[]` |
+| [agent.defaultsProviderTemplate](./values.yaml#L882) | string | The name of the pod template to use for providing default values | `""` |
+| [agent.directConnection](./values.yaml#L922) | bool | | `false` |
+| [agent.disableDefaultAgent](./values.yaml#L1109) | bool | Disable the default Jenkins Agent configuration | `false` |
+| [agent.enabled](./values.yaml#L880) | bool | Enable Kubernetes plugin jnlp-agent podTemplate | `true` |
+| [agent.envVars](./values.yaml#L1033) | list | Environment variables for the agent Pod | `[]` |
+| [agent.hostNetworking](./values.yaml#L930) | bool | Enables the agent to use the host network | `false` |
+| [agent.idleMinutes](./values.yaml#L1065) | int | Allows the Pod to remain active for reuse until the configured number of minutes has passed since the last step was executed on it | `0` |
+| [agent.image.repository](./values.yaml#L909) | string | Repository to pull the agent jnlp image from | `"jenkins/inbound-agent"` |
+| [agent.image.tag](./values.yaml#L911) | string | Tag of the image to pull | `"3206.vb_15dcf73f6a_9-3"` |
+| [agent.imagePullSecretName](./values.yaml#L918) | string | Name of the secret to be used to pull the image | `nil` |
+| [agent.jenkinsTunnel](./values.yaml#L890) | string | Overrides the Kubernetes Jenkins tunnel | `nil` |
+| [agent.jenkinsUrl](./values.yaml#L886) | string | Overrides the Kubernetes Jenkins URL | `nil` |
+| [agent.jnlpregistry](./values.yaml#L906) | string | Custom registry used to pull the agent jnlp image from | `nil` |
+| [agent.kubernetesConnectTimeout](./values.yaml#L892) | int | The connection timeout in seconds for connections to Kubernetes API. The minimum value is 5 | `5` |
+| [agent.kubernetesReadTimeout](./values.yaml#L894) | int | The read timeout in seconds for connections to Kubernetes API. The minimum value is 15 | `15` |
+| [agent.livenessProbe](./values.yaml#L941) | object | | `{}` |
+| [agent.maxRequestsPerHostStr](./values.yaml#L896) | string | The maximum concurrent connections to Kubernetes API | `"32"` |
+| [agent.namespace](./values.yaml#L902) | string | Namespace in which the Kubernetes agents should be launched | `nil` |
+| [agent.nodeSelector](./values.yaml#L1044) | object | Node labels for pod assignment | `{}` |
+| [agent.nodeUsageMode](./values.yaml#L914) | string | | `"NORMAL"` |
+| [agent.podLabels](./values.yaml#L904) | object | Custom Pod labels (an object with `label-key: label-value` pairs) | `{}` |
+| [agent.podName](./values.yaml#L1062) | string | Agent Pod base name | `"default"` |
+| [agent.podRetention](./values.yaml#L960) | string | | `"Never"` |
+| [agent.podTemplates](./values.yaml#L1119) | object | Configures extra pod templates for the default kubernetes cloud | `{}` |
+| [agent.privileged](./values.yaml#L924) | bool | Agent privileged container | `false` |
+| [agent.resources](./values.yaml#L932) | object | Resources allocation (Requests and Limits) | `{"limits":{"cpu":"512m","memory":"512Mi"},"requests":{"cpu":"512m","memory":"512Mi"}}` |
+| [agent.restrictedPssSecurityContext](./values.yaml#L957) | bool | Set a restricted securityContext on jnlp containers | `false` |
+| [agent.retentionTimeout](./values.yaml#L898) | int | Time in minutes after which the Kubernetes cloud plugin will clean up an idle worker that has not already terminated | `5` |
+| [agent.runAsGroup](./values.yaml#L928) | string | Configure container group | `nil` |
+| [agent.runAsUser](./values.yaml#L926) | string | Configure container user | `nil` |
+| [agent.secretEnvVars](./values.yaml#L1037) | list | Mount a secret as environment variable | `[]` |
+| [agent.showRawYaml](./values.yaml#L964) | bool | | `true` |
+| [agent.sideContainerName](./values.yaml#L1054) | string | Side container name | `"jnlp"` |
+| [agent.volumes](./values.yaml#L971) | list | Additional volumes | `[]` |
+| [agent.waitForPodSec](./values.yaml#L900) | int | Seconds to wait for pod to be running | `600` |
+| [agent.websocket](./values.yaml#L921) | bool | Enables agent communication via websockets | `false` |
+| [agent.workingDir](./values.yaml#L913) | string | Configure working directory for default agent | `"/home/jenkins/agent"` |
+| [agent.workspaceVolume](./values.yaml#L1006) | object | Workspace volume (defaults to EmptyDir) | `{}` |
+| [agent.yamlMergeStrategy](./values.yaml#L1083) | string | Defines how the raw yaml field gets merged with yaml definitions from inherited pod templates. Possible values: "merge" or "override" | `"override"` |
+| [agent.yamlTemplate](./values.yaml#L1072) | string | The raw yaml of a Pod API Object to merge into the agent spec | `""` |
+| [awsSecurityGroupPolicies.enabled](./values.yaml#L1289) | bool | | `false` |
+| [awsSecurityGroupPolicies.policies[0].name](./values.yaml#L1291) | string | | `""` |
+| [awsSecurityGroupPolicies.policies[0].podSelector](./values.yaml#L1293) | object | | `{}` |
+| [awsSecurityGroupPolicies.policies[0].securityGroupIds](./values.yaml#L1292) | list | | `[]` |
+| [checkDeprecation](./values.yaml#L1286) | bool | Checks if any deprecated values are used | `true` |
+| [clusterZone](./values.yaml#L21) | string | Override the cluster name for FQDN resolving | `"cluster.local"` |
+| [controller.JCasC.authorizationStrategy](./values.yaml#L533) | string | Jenkins Config as Code Authorization Strategy-section | `"loggedInUsersCanDoAnything:\n allowAnonymousRead: false"` |
+| [controller.JCasC.configScripts](./values.yaml#L507) | object | List of Jenkins Config as Code scripts | `{}` |
+| [controller.JCasC.configUrls](./values.yaml#L504) | list | Remote URLs for configuration files. | `[]` |
+| [controller.JCasC.defaultConfig](./values.yaml#L498) | bool | Enables default Jenkins configuration via configuration as code plugin | `true` |
+| [controller.JCasC.overwriteConfiguration](./values.yaml#L502) | bool | Whether Jenkins Config as Code should overwrite any existing configuration | `false` |
+| [controller.JCasC.security](./values.yaml#L514) | object | Jenkins Config as Code security-section | `{"apiToken":{"creationOfLegacyTokenEnabled":false,"tokenGenerationOnCreationEnabled":false,"usageStatisticsEnabled":true}}` |
+| [controller.JCasC.securityRealm](./values.yaml#L522) | string | Jenkins Config as Code Security Realm-section | `"local:\n allowsSignup: false\n enableCaptcha: false\n users:\n - id: \"${chart-admin-username}\"\n name: \"Jenkins Admin\"\n password: \"${chart-admin-password}\""` |
+| [controller.additionalExistingSecrets](./values.yaml#L459) | list | List of additional existing secrets to mount | `[]` |
+| [controller.additionalPlugins](./values.yaml#L409) | list | List of plugins to install in addition to those listed in controller.installPlugins | `[]` |
+| [controller.additionalSecrets](./values.yaml#L468) | list | List of additional secrets to create and mount | `[]` |
+| [controller.admin.createSecret](./values.yaml#L91) | bool | Create secret for admin user | `true` |
+| [controller.admin.existingSecret](./values.yaml#L94) | string | The name of an existing secret containing the admin credentials | `""` |
+| [controller.admin.password](./values.yaml#L81) | string | Admin password created as a secret if `controller.admin.createSecret` is true | `<random password>` |
+| [controller.admin.passwordKey](./values.yaml#L86) | string | The key in the existing admin secret containing the password | `"jenkins-admin-password"` |
+| [controller.admin.userKey](./values.yaml#L84) | string | The key in the existing admin secret containing the username | `"jenkins-admin-user"` |
+| [controller.admin.username](./values.yaml#L78) | string | Admin username created as a secret if `controller.admin.createSecret` is true | `"admin"` |
+| [controller.affinity](./values.yaml#L638) | object | Affinity settings | `{}` |
+| [controller.agentListenerEnabled](./values.yaml#L318) | bool | Create Agent listener service | `true` |
+| [controller.agentListenerExternalTrafficPolicy](./values.yaml#L328) | string | Traffic Policy of for the agentListener service | `nil` |
+| [controller.agentListenerHostPort](./values.yaml#L322) | string | Host port to listen for agents | `nil` |
+| [controller.agentListenerLoadBalancerIP](./values.yaml#L358) | string | Static IP for the agentListener LoadBalancer | `nil` |
+| [controller.agentListenerLoadBalancerSourceRanges](./values.yaml#L330) | list | Allowed inbound IP for the agentListener service | `["0.0.0.0/0"]` |
+| [controller.agentListenerNodePort](./values.yaml#L324) | string | Node port to listen for agents | `nil` |
+| [controller.agentListenerPort](./values.yaml#L320) | int | Listening port for agents | `50000` |
+| [controller.agentListenerServiceAnnotations](./values.yaml#L353) | object | Annotations for the agentListener service | `{}` |
+| [controller.agentListenerServiceType](./values.yaml#L350) | string | Defines how to expose the agentListener service | `"ClusterIP"` |
+| [controller.backendconfig.annotations](./values.yaml#L738) | object | backendconfig annotations | `{}` |
+| [controller.backendconfig.apiVersion](./values.yaml#L732) | string | backendconfig API version | `"extensions/v1beta1"` |
+| [controller.backendconfig.enabled](./values.yaml#L730) | bool | Enables backendconfig | `false` |
+| [controller.backendconfig.labels](./values.yaml#L736) | object | backendconfig labels | `{}` |
+| [controller.backendconfig.name](./values.yaml#L734) | string | backendconfig name | `nil` |
+| [controller.backendconfig.spec](./values.yaml#L740) | object | backendconfig spec | `{}` |
+| [controller.cloudName](./values.yaml#L487) | string | Name of default cloud configuration. | `"kubernetes"` |
+| [controller.clusterIp](./values.yaml#L217) | string | k8s service clusterIP. Only used if serviceType is ClusterIP | `nil` |
+| [controller.componentName](./values.yaml#L34) | string | Used for label app.kubernetes.io/component | `"jenkins-controller"` |
+| [controller.containerEnv](./values.yaml#L150) | list | Environment variables for Jenkins Container | `[]` |
+| [controller.containerEnvFrom](./values.yaml#L147) | list | Environment variable sources for Jenkins Container | `[]` |
+| [controller.containerSecurityContext](./values.yaml#L205) | object | Allow controlling the securityContext for the jenkins container | `{"allowPrivilegeEscalation":false,"readOnlyRootFilesystem":true,"runAsGroup":1000,"runAsUser":1000}` |
+| [controller.csrf.defaultCrumbIssuer.enabled](./values.yaml#L339) | bool | Enable the default CSRF Crumb issuer | `true` |
+| [controller.csrf.defaultCrumbIssuer.proxyCompatability](./values.yaml#L341) | bool | Enable proxy compatibility | `true` |
+| [controller.customInitContainers](./values.yaml#L537) | list | Custom init-container specification in raw-yaml format | `[]` |
+| [controller.customJenkinsLabels](./values.yaml#L68) | list | Append Jenkins labels to the controller | `[]` |
+| [controller.disableRememberMe](./values.yaml#L59) | bool | Disable use of remember me | `false` |
+| [controller.disabledAgentProtocols](./values.yaml#L333) | list | Disabled agent protocols | `["JNLP-connect","JNLP2-connect"]` |
+| [controller.enableRawHtmlMarkupFormatter](./values.yaml#L429) | bool | Enable HTML parsing using OWASP Markup Formatter Plugin (antisamy-markup-formatter) | `false` |
+| [controller.executorMode](./values.yaml#L65) | string | Sets the executor mode of the Jenkins node. Possible values are "NORMAL" or "EXCLUSIVE" | `"NORMAL"` |
+| [controller.existingSecret](./values.yaml#L456) | string | | `nil` |
+| [controller.extraPorts](./values.yaml#L388) | list | Optionally configure other ports to expose in the controller container | `[]` |
+| [controller.fsGroup](./values.yaml#L186) | int | Deprecated in favor of `controller.podSecurityContextOverride`. uid that will be used for persistent volume. | `1000` |
+| [controller.googlePodMonitor.enabled](./values.yaml#L801) | bool | | `false` |
+| [controller.googlePodMonitor.scrapeEndpoint](./values.yaml#L806) | string | | `"/prometheus"` |
+| [controller.googlePodMonitor.scrapeInterval](./values.yaml#L804) | string | | `"60s"` |
+| [controller.healthProbes](./values.yaml#L248) | bool | Enable Kubernetes Probes configuration configured in `controller.probes` | `true` |
+| [controller.hostAliases](./values.yaml#L754) | list | Allows for adding entries to Pod /etc/hosts | `[]` |
+| [controller.hostNetworking](./values.yaml#L70) | bool | | `false` |
+| [controller.httpsKeyStore.disableSecretMount](./values.yaml#L822) | bool | | `false` |
+| [controller.httpsKeyStore.enable](./values.yaml#L813) | bool | Enables HTTPS keystore on jenkins controller | `false` |
+| [controller.httpsKeyStore.fileName](./values.yaml#L830) | string | Jenkins keystore filename which will appear under controller.httpsKeyStore.path | `"keystore.jks"` |
+| [controller.httpsKeyStore.httpPort](./values.yaml#L826) | int | HTTP Port that Jenkins should listen to along with HTTPS, it also serves as the liveness and readiness probes port. | `8081` |
+| [controller.httpsKeyStore.jenkinsHttpsJksPasswordSecretKey](./values.yaml#L821) | string | Name of the key in the secret that contains the JKS password | `"https-jks-password"` |
+| [controller.httpsKeyStore.jenkinsHttpsJksPasswordSecretName](./values.yaml#L819) | string | Name of the secret that contains the JKS password, if it is not in the same secret as the JKS file | `""` |
+| [controller.httpsKeyStore.jenkinsHttpsJksSecretKey](./values.yaml#L817) | string | Name of the key in the secret that already has ssl keystore | `"jenkins-jks-file"` |
+| [controller.httpsKeyStore.jenkinsHttpsJksSecretName](./values.yaml#L815) | string | Name of the secret that already has ssl keystore | `""` |
+| [controller.httpsKeyStore.jenkinsKeyStoreBase64Encoded](./values.yaml#L835) | string | Base64 encoded Keystore content. Keystore must be converted to base64 then being pasted here | `nil` |
+| [controller.httpsKeyStore.password](./values.yaml#L832) | string | Jenkins keystore password | `"password"` |
+| [controller.httpsKeyStore.path](./values.yaml#L828) | string | Path of HTTPS keystore file | `"/var/jenkins_keystore"` |
+| [controller.image.pullPolicy](./values.yaml#L47) | string | Controller image pull policy | `"Always"` |
+| [controller.image.registry](./values.yaml#L37) | string | Controller image registry | `"docker.io"` |
+| [controller.image.repository](./values.yaml#L39) | string | Controller image repository | `"jenkins/jenkins"` |
+| [controller.image.tag](./values.yaml#L42) | string | Controller image tag override; i.e., tag: "2.440.1-jdk17" | `nil` |
+| [controller.image.tagLabel](./values.yaml#L45) | string | Controller image tag label | `"jdk17"` |
+| [controller.imagePullSecretName](./values.yaml#L49) | string | Controller image pull secret | `nil` |
+| [controller.ingress.annotations](./values.yaml#L677) | object | Ingress annotations | `{}` |
+| [controller.ingress.apiVersion](./values.yaml#L673) | string | Ingress API version | `"extensions/v1beta1"` |
+| [controller.ingress.enabled](./values.yaml#L656) | bool | Enables ingress | `false` |
+| [controller.ingress.hostName](./values.yaml#L690) | string | Ingress hostname | `nil` |
+| [controller.ingress.labels](./values.yaml#L675) | object | Ingress labels | `{}` |
+| [controller.ingress.path](./values.yaml#L686) | string | Ingress path | `nil` |
+| [controller.ingress.paths](./values.yaml#L660) | list | Override for the default Ingress paths | `[]` |
+| [controller.ingress.resourceRootUrl](./values.yaml#L692) | string | Hostname to serve assets from | `nil` |
+| [controller.ingress.tls](./values.yaml#L694) | list | Ingress TLS configuration | `[]` |
+| [controller.initConfigMap](./values.yaml#L446) | string | Name of the existing ConfigMap that contains init scripts | `nil` |
+| [controller.initContainerEnv](./values.yaml#L141) | list | Environment variables for Init Container | `[]` |
+| [controller.initContainerEnvFrom](./values.yaml#L137) | list | Environment variable sources for Init Container | `[]` |
+| [controller.initContainerResources](./values.yaml#L128) | object | Resources allocation (Requests and Limits) for Init Container | `{}` |
+| [controller.initScripts](./values.yaml#L442) | object | Map of groovy init scripts to be executed during Jenkins controller start | `{}` |
+| [controller.initializeOnce](./values.yaml#L414) | bool | Initialize only on first installation. Ensures plugins do not get updated inadvertently. Requires `persistence.enabled` to be set to `true` | `false` |
+| [controller.installLatestPlugins](./values.yaml#L403) | bool | Download the minimum required version or latest version of all dependencies | `true` |
+| [controller.installLatestSpecifiedPlugins](./values.yaml#L406) | bool | Set to true to download the latest version of any plugin that is requested to have the latest version | `false` |
+| [controller.installPlugins](./values.yaml#L395) | list | List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` | `["kubernetes:4203.v1dd44f5b_1cf9","workflow-aggregator:596.v8c21c963d92d","git:5.2.1","configuration-as-code:1775.v810dc950b_514"]` |
+| [controller.javaOpts](./values.yaml#L156) | string | Append to `JAVA_OPTS` env var | `nil` |
+| [controller.jenkinsAdminEmail](./values.yaml#L96) | string | Email address for the administrator of the Jenkins instance | `nil` |
+| [controller.jenkinsHome](./values.yaml#L101) | string | Custom Jenkins home path | `"/var/jenkins_home"` |
+| [controller.jenkinsOpts](./values.yaml#L158) | string | Append to `JENKINS_OPTS` env var | `nil` |
+| [controller.jenkinsRef](./values.yaml#L106) | string | Custom Jenkins reference path | `"/usr/share/jenkins/ref"` |
+| [controller.jenkinsUriPrefix](./values.yaml#L173) | string | Root URI Jenkins will be served on | `nil` |
+| [controller.jenkinsUrl](./values.yaml#L168) | string | Set Jenkins URL if you are not using the ingress definitions provided by the chart | `nil` |
+| [controller.jenkinsUrlProtocol](./values.yaml#L165) | string | Set protocol for Jenkins URL; `https` if `controller.ingress.tls`, `http` otherwise | `nil` |
+| [controller.jenkinsWar](./values.yaml#L109) | string | | `"/usr/share/jenkins/jenkins.war"` |
+| [controller.jmxPort](./values.yaml#L385) | string | Open a port, for JMX stats | `nil` |
+| [controller.legacyRemotingSecurityEnabled](./values.yaml#L361) | bool | Whether legacy remoting security should be enabled | `false` |
+| [controller.lifecycle](./values.yaml#L51) | object | Lifecycle specification for controller-container | `{}` |
+| [controller.loadBalancerIP](./values.yaml#L376) | string | Optionally assign a known public LB IP | `nil` |
+| [controller.loadBalancerSourceRanges](./values.yaml#L372) | list | Allowed inbound IP addresses | `["0.0.0.0/0"]` |
+| [controller.markupFormatter](./values.yaml#L433) | string | Yaml of the markup formatter to use | `"plainText"` |
+| [controller.nodePort](./values.yaml#L223) | string | k8s node port. Only used if serviceType is NodePort | `nil` |
+| [controller.nodeSelector](./values.yaml#L625) | object | Node labels for pod assignment | `{}` |
+| [controller.numExecutors](./values.yaml#L62) | int | Set Number of executors | `0` |
+| [controller.overwritePlugins](./values.yaml#L418) | bool | Overwrite installed plugins on start | `false` |
+| [controller.overwritePluginsFromImage](./values.yaml#L422) | bool | Overwrite plugins that are already installed in the controller image | `true` |
+| [controller.podAnnotations](./values.yaml#L646) | object | Annotations for controller pod | `{}` |
+| [controller.podDisruptionBudget.annotations](./values.yaml#L312) | object | | `{}` |
+| [controller.podDisruptionBudget.apiVersion](./values.yaml#L310) | string | Policy API version | `"policy/v1beta1"` |
+| [controller.podDisruptionBudget.enabled](./values.yaml#L305) | bool | Enable Kubernetes Pod Disruption Budget configuration | `false` |
+| [controller.podDisruptionBudget.labels](./values.yaml#L313) | object | | `{}` |
+| [controller.podDisruptionBudget.maxUnavailable](./values.yaml#L315) | string | Number of pods that can be unavailable. Either an absolute number or a percentage | `"0"` |
+| [controller.podLabels](./values.yaml#L241) | object | Custom Pod labels (an object with `label-key: label-value` pairs) | `{}` |
+| [controller.podSecurityContextOverride](./values.yaml#L202) | string | Completely overwrites the contents of the pod security context, ignoring the values provided for `runAsUser`, `fsGroup`, and `securityContextCapabilities` | `nil` |
+| [controller.priorityClassName](./values.yaml#L643) | string | The name of a `priorityClass` to apply to the controller pod | `nil` |
+| [controller.probes.livenessProbe.failureThreshold](./values.yaml#L266) | int | Set the failure threshold for the liveness probe | `5` |
+| [controller.probes.livenessProbe.httpGet.path](./values.yaml#L269) | string | Set the Pod's HTTP path for the liveness probe | `"{{ default \"\" .Values.controller.jenkinsUriPrefix }}/login"` |
+| [controller.probes.livenessProbe.httpGet.port](./values.yaml#L271) | string | Set the Pod's HTTP port to use for the liveness probe | `"http"` |
+| [controller.probes.livenessProbe.initialDelaySeconds](./values.yaml#L280) | string | Set the initial delay for the liveness probe in seconds | `nil` |
+| [controller.probes.livenessProbe.periodSeconds](./values.yaml#L273) | int | Set the time interval between two liveness probes executions in seconds | `10` |
+| [controller.probes.livenessProbe.timeoutSeconds](./values.yaml#L275) | int | Set the timeout for the liveness probe in seconds | `5` |
+| [controller.probes.readinessProbe.failureThreshold](./values.yaml#L284) | int | Set the failure threshold for the readiness probe | `3` |
+| [controller.probes.readinessProbe.httpGet.path](./values.yaml#L287) | string | Set the Pod's HTTP path for the liveness probe | `"{{ default \"\" .Values.controller.jenkinsUriPrefix }}/login"` |
+| [controller.probes.readinessProbe.httpGet.port](./values.yaml#L289) | string | Set the Pod's HTTP port to use for the readiness probe | `"http"` |
+| [controller.probes.readinessProbe.initialDelaySeconds](./values.yaml#L298) | string | Set the initial delay for the readiness probe in seconds | `nil` |
+| [controller.probes.readinessProbe.periodSeconds](./values.yaml#L291) | int | Set the time interval between two readiness probes executions in seconds | `10` |
+| [controller.probes.readinessProbe.timeoutSeconds](./values.yaml#L293) | int | Set the timeout for the readiness probe in seconds | `5` |
+| [controller.probes.startupProbe.failureThreshold](./values.yaml#L253) | int | Set the failure threshold for the startup probe | `12` |
+| [controller.probes.startupProbe.httpGet.path](./values.yaml#L256) | string | Set the Pod's HTTP path for the startup probe | `"{{ default \"\" .Values.controller.jenkinsUriPrefix }}/login"` |
+| [controller.probes.startupProbe.httpGet.port](./values.yaml#L258) | string | Set the Pod's HTTP port to use for the startup probe | `"http"` |
+| [controller.probes.startupProbe.periodSeconds](./values.yaml#L260) | int | Set the time interval between two startup probes executions in seconds | `10` |
+| [controller.probes.startupProbe.timeoutSeconds](./values.yaml#L262) | int | Set the timeout for the startup probe in seconds | `5` |
+| [controller.projectNamingStrategy](./values.yaml#L425) | string | | `"standard"` |
+| [controller.prometheus.alertingRulesAdditionalLabels](./values.yaml#L787) | object | Additional labels to add to the PrometheusRule object | `{}` |
+| [controller.prometheus.alertingrules](./values.yaml#L785) | list | Array of prometheus alerting rules | `[]` |
+| [controller.prometheus.enabled](./values.yaml#L770) | bool | Enables prometheus service monitor | `false` |
+| [controller.prometheus.metricRelabelings](./values.yaml#L797) | list | | `[]` |
+| [controller.prometheus.prometheusRuleNamespace](./values.yaml#L789) | string | Set a custom namespace where to deploy PrometheusRule resource | `""` |
+| [controller.prometheus.relabelings](./values.yaml#L795) | list | | `[]` |
+| [controller.prometheus.scrapeEndpoint](./values.yaml#L780) | string | The endpoint prometheus should get metrics from | `"/prometheus"` |
+| [controller.prometheus.scrapeInterval](./values.yaml#L776) | string | How often prometheus should scrape metrics | `"60s"` |
+| [controller.prometheus.serviceMonitorAdditionalLabels](./values.yaml#L772) | object | Additional labels to add to the service monitor object | `{}` |
+| [controller.prometheus.serviceMonitorNamespace](./values.yaml#L774) | string | Set a custom namespace where to deploy ServiceMonitor resource | `nil` |
+| [controller.resources](./values.yaml#L115) | object | Resource allocation (Requests and Limits) | `{"limits":{"cpu":"2000m","memory":"4096Mi"},"requests":{"cpu":"50m","memory":"256Mi"}}` |
+| [controller.route.annotations](./values.yaml#L749) | object | Route annotations | `{}` |
+| [controller.route.enabled](./values.yaml#L745) | bool | Enables openshift route | `false` |
+| [controller.route.labels](./values.yaml#L747) | object | Route labels | `{}` |
+| [controller.route.path](./values.yaml#L751) | string | Route path | `nil` |
+| [controller.runAsUser](./values.yaml#L183) | int | Deprecated in favor of `controller.podSecurityContextOverride`. uid that jenkins runs with. | `1000` |
+| [controller.schedulerName](./values.yaml#L621) | string | Name of the Kubernetes scheduler to use | `""` |
+| [controller.scriptApproval](./values.yaml#L437) | list | List of groovy functions to approve | `[]` |
+| [controller.secondaryingress.annotations](./values.yaml#L712) | object | | `{}` |
+| [controller.secondaryingress.apiVersion](./values.yaml#L710) | string | | `"extensions/v1beta1"` |
+| [controller.secondaryingress.enabled](./values.yaml#L704) | bool | | `false` |
+| [controller.secondaryingress.hostName](./values.yaml#L719) | string | | `nil` |
+| [controller.secondaryingress.labels](./values.yaml#L711) | object | | `{}` |
+| [controller.secondaryingress.paths](./values.yaml#L707) | list | | `[]` |
+| [controller.secondaryingress.tls](./values.yaml#L720) | string | | `nil` |
+| [controller.secretClaims](./values.yaml#L480) | list | List of `SecretClaim` resources to create | `[]` |
+| [controller.securityContextCapabilities](./values.yaml#L192) | object | | `{}` |
+| [controller.serviceAnnotations](./values.yaml#L230) | object | Jenkins controller service annotations | `{}` |
+| [controller.serviceExternalTrafficPolicy](./values.yaml#L227) | string | | `nil` |
+| [controller.serviceLabels](./values.yaml#L236) | object | Labels for the Jenkins controller-service | `{}` |
+| [controller.servicePort](./values.yaml#L219) | int | k8s service port | `8080` |
+| [controller.serviceType](./values.yaml#L214) | string | k8s service type | `"ClusterIP"` |
+| [controller.shareProcessNamespace](./values.yaml#L124) | bool | | `false` |
+| [controller.sidecars.additionalSidecarContainers](./values.yaml#L603) | list | Configures additional sidecar container(s) for the Jenkins controller | `[]` |
+| [controller.sidecars.configAutoReload.containerSecurityContext](./values.yaml#L598) | object | Enable container security context | `{"allowPrivilegeEscalation":false,"readOnlyRootFilesystem":true}` |
+| [controller.sidecars.configAutoReload.enabled](./values.yaml#L550) | bool | Enables Jenkins Config as Code auto-reload | `true` |
+| [controller.sidecars.configAutoReload.env](./values.yaml#L580) | object | Environment variables for the Jenkins Config as Code auto-reload container | `{}` |
+| [controller.sidecars.configAutoReload.envFrom](./values.yaml#L578) | list | Environment variable sources for the Jenkins Config as Code auto-reload container | `[]` |
+| [controller.sidecars.configAutoReload.folder](./values.yaml#L591) | string | | `"/var/jenkins_home/casc_configs"` |
+| [controller.sidecars.configAutoReload.image.registry](./values.yaml#L553) | string | Registry for the image that triggers the reload | `"docker.io"` |
+| [controller.sidecars.configAutoReload.image.repository](./values.yaml#L555) | string | Repository of the image that triggers the reload | `"kiwigrid/k8s-sidecar"` |
+| [controller.sidecars.configAutoReload.image.tag](./values.yaml#L557) | string | Tag for the image that triggers the reload | `"1.26.1"` |
+| [controller.sidecars.configAutoReload.imagePullPolicy](./values.yaml#L558) | string | | `"IfNotPresent"` |
+| [controller.sidecars.configAutoReload.reqRetryConnect](./values.yaml#L573) | int | How many connection-related errors to retry on | `10` |
+| [controller.sidecars.configAutoReload.resources](./values.yaml#L559) | object | | `{}` |
+| [controller.sidecars.configAutoReload.scheme](./values.yaml#L568) | string | The scheme to use when connecting to the Jenkins configuration as code endpoint | `"http"` |
+| [controller.sidecars.configAutoReload.skipTlsVerify](./values.yaml#L570) | bool | Skip TLS verification when connecting to the Jenkins configuration as code endpoint | `false` |
+| [controller.sidecars.configAutoReload.sleepTime](./values.yaml#L575) | string | How many seconds to wait before updating config-maps/secrets (sets METHOD=SLEEP on the sidecar) | `nil` |
+| [controller.sidecars.configAutoReload.sshTcpPort](./values.yaml#L589) | int | | `1044` |
+| [controller.statefulSetAnnotations](./values.yaml#L648) | object | Annotations for controller StatefulSet | `{}` |
+| [controller.statefulSetLabels](./values.yaml#L232) | object | Jenkins controller custom labels for the StatefulSet | `{}` |
+| [controller.targetPort](./values.yaml#L221) | int | k8s target port | `8080` |
+| [controller.terminationGracePeriodSeconds](./values.yaml#L631) | string | Set TerminationGracePeriodSeconds | `nil` |
+| [controller.terminationMessagePath](./values.yaml#L633) | string | Set the termination message path | `nil` |
+| [controller.terminationMessagePolicy](./values.yaml#L635) | string | Set the termination message policy | `nil` |
+| [controller.testEnabled](./values.yaml#L809) | bool | Can be used to disable rendering controller test resources when using helm template | `true` |
+| [controller.tolerations](./values.yaml#L629) | list | Toleration labels for pod assignment | `[]` |
+| [controller.updateStrategy](./values.yaml#L652) | object | Update strategy for StatefulSet | `{}` |
+| [controller.usePodSecurityContext](./values.yaml#L176) | bool | Enable pod security context (must be `true` if podSecurityContextOverride, runAsUser or fsGroup are set) | `true` |
+| [credentialsId](./values.yaml#L27) | string | The Jenkins credentials to access the Kubernetes API server. For the default cluster it is not needed. | `nil` |
+| [fullnameOverride](./values.yaml#L13) | string | Override the full resource names | `jenkins-(release-name)` or `jenkins` if the release-name is `jenkins` |
+| [helmtest.bats.image.registry](./values.yaml#L1302) | string | Registry of the image used to test the framework | `"docker.io"` |
+| [helmtest.bats.image.repository](./values.yaml#L1304) | string | Repository of the image used to test the framework | `"bats/bats"` |
+| [helmtest.bats.image.tag](./values.yaml#L1306) | string | Tag of the image to test the framework | `"1.11.0"` |
+| [kubernetesURL](./values.yaml#L24) | string | The URL of the Kubernetes API server | `"https://kubernetes.default"` |
+| [nameOverride](./values.yaml#L10) | string | Override the resource name prefix | `Chart.Name` |
+| [namespaceOverride](./values.yaml#L16) | string | Override the deployment namespace | `Release.Namespace` |
+| [networkPolicy.apiVersion](./values.yaml#L1232) | string | NetworkPolicy ApiVersion | `"networking.k8s.io/v1"` |
+| [networkPolicy.enabled](./values.yaml#L1227) | bool | Enable the creation of NetworkPolicy resources | `false` |
+| [networkPolicy.externalAgents.except](./values.yaml#L1246) | list | A list of IP sub-ranges to be excluded from the allowlisted IP range | `[]` |
+| [networkPolicy.externalAgents.ipCIDR](./values.yaml#L1244) | string | The IP range from which external agents are allowed to connect to controller, i.e., 172.17.0.0/16 | `nil` |
+| [networkPolicy.internalAgents.allowed](./values.yaml#L1236) | bool | Allow internal agents (from the same cluster) to connect to controller. Agent pods will be filtered based on PodLabels | `true` |
+| [networkPolicy.internalAgents.namespaceLabels](./values.yaml#L1240) | object | A map of labels (keys/values) that agents namespaces must have to be able to connect to controller | `{}` |
+| [networkPolicy.internalAgents.podLabels](./values.yaml#L1238) | object | A map of labels (keys/values) that agent pods must have to be able to connect to controller | `{}` |
+| [persistence.accessMode](./values.yaml#L1202) | string | The PVC access mode | `"ReadWriteOnce"` |
+| [persistence.annotations](./values.yaml#L1198) | object | Annotations for the PVC | `{}` |
+| [persistence.dataSource](./values.yaml#L1208) | object | Existing data source to clone PVC from | `{}` |
+| [persistence.enabled](./values.yaml#L1182) | bool | Enable the use of a Jenkins PVC | `true` |
+| [persistence.existingClaim](./values.yaml#L1188) | string | Provide the name of a PVC | `nil` |
+| [persistence.labels](./values.yaml#L1200) | object | Labels for the PVC | `{}` |
+| [persistence.mounts](./values.yaml#L1220) | list | Additional mounts | `[]` |
+| [persistence.size](./values.yaml#L1204) | string | The size of the PVC | `"8Gi"` |
+| [persistence.storageClass](./values.yaml#L1196) | string | Storage class for the PVC | `nil` |
+| [persistence.subPath](./values.yaml#L1213) | string | SubPath for jenkins-home mount | `nil` |
+| [persistence.volumes](./values.yaml#L1215) | list | Additional volumes | `[]` |
+| [rbac.create](./values.yaml#L1252) | bool | Whether RBAC resources are created | `true` |
+| [rbac.readSecrets](./values.yaml#L1254) | bool | Whether the Jenkins service account should be able to read Kubernetes secrets | `false` |
+| [renderHelmLabels](./values.yaml#L30) | bool | Enables rendering of the helm.sh/chart label to the annotations | `true` |
+| [serviceAccount.annotations](./values.yaml#L1264) | object | Configures annotations for the ServiceAccount | `{}` |
+| [serviceAccount.create](./values.yaml#L1258) | bool | Configures if a ServiceAccount with this name should be created | `true` |
+| [serviceAccount.extraLabels](./values.yaml#L1266) | object | Configures extra labels for the ServiceAccount | `{}` |
+| [serviceAccount.imagePullSecretName](./values.yaml#L1268) | string | Controller ServiceAccount image pull secret | `nil` |
+| [serviceAccount.name](./values.yaml#L1262) | string | | `nil` |
+| [serviceAccountAgent.annotations](./values.yaml#L1279) | object | Configures annotations for the agent ServiceAccount | `{}` |
+| [serviceAccountAgent.create](./values.yaml#L1273) | bool | Configures if an agent ServiceAccount should be created | `false` |
+| [serviceAccountAgent.extraLabels](./values.yaml#L1281) | object | Configures extra labels for the agent ServiceAccount | `{}` |
+| [serviceAccountAgent.imagePullSecretName](./values.yaml#L1283) | string | Agent ServiceAccount image pull secret | `nil` |
+| [serviceAccountAgent.name](./values.yaml#L1277) | string | The name of the agent ServiceAccount to be used by access-controlled resources | `nil` |
diff --git a/charts/jenkins/VALUES.md.gotmpl b/charts/jenkins/VALUES.md.gotmpl
new file mode 100644
index 0000000..21080e3
--- /dev/null
+++ b/charts/jenkins/VALUES.md.gotmpl
@@ -0,0 +1,28 @@
+# Jenkins
+
+## Configuration
+
+The following tables list the configurable parameters of the Jenkins chart and their default values.
+
+{{- define "chart.valueDefaultColumnRender" -}}
+{{- $defaultValue := (trimAll "`" (default .Default .AutoDefault) | replace "\n" "") -}}
+`{{- $defaultValue | replace "\n" "" -}}`
+{{- end -}}
+
+{{- define "chart.typeColumnRender" -}}
+{{- .Type -}}
+{{- end -}}
+
+{{- define "chart.valueDescription" -}}
+{{- default .Description .AutoDescription }}
+{{- end -}}
+
+{{- define "chart.valuesTable" -}}
+| Key | Type | Description | Default |
+|:----|:-----|:---------|:------------|
+{{- range .Values }}
+| [{{ .Key }}](./values.yaml#L{{ .LineNumber }}) | {{ template "chart.typeColumnRender" . }} | {{ template "chart.valueDescription" . }} | {{ template "chart.valueDefaultColumnRender" . }} |
+{{- end }}
+{{- end }}
+
+{{ template "chart.valuesSection" . }}
diff --git a/charts/jenkins/templates/NOTES.txt b/charts/jenkins/templates/NOTES.txt
new file mode 100644
index 0000000..953dd26
--- /dev/null
+++ b/charts/jenkins/templates/NOTES.txt
@@ -0,0 +1,68 @@
+{{- $prefix := .Values.controller.jenkinsUriPrefix | default "" -}}
+{{- $url := "" -}}
+1. Get your '{{ .Values.controller.admin.username }}' user password by running:
+ kubectl exec --namespace {{ template "jenkins.namespace" . }} -it svc/{{ template "jenkins.fullname" . }} -c jenkins -- /bin/cat /run/secrets/additional/chart-admin-password && echo
+{{- if .Values.controller.ingress.hostName -}}
+{{- if .Values.controller.ingress.tls -}}
+{{- $url = print "https://" .Values.controller.ingress.hostName $prefix -}}
+{{- else -}}
+{{- $url = print "http://" .Values.controller.ingress.hostName $prefix -}}
+{{- end }}
+2. Visit {{ $url }}
+{{- else }}
+2. Get the Jenkins URL to visit by running these commands in the same shell:
+{{- if contains "NodePort" .Values.controller.serviceType }}
+ export NODE_PORT=$(kubectl get --namespace {{ template "jenkins.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "jenkins.fullname" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ template "jenkins.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
+{{- if .Values.controller.httpsKeyStore.enable -}}
+{{- $url = print "https://$NODE_IP:$NODE_PORT" $prefix -}}
+{{- else -}}
+{{- $url = print "http://$NODE_IP:$NODE_PORT" $prefix -}}
+{{- end }}
+ echo {{ $url }}
+
+{{- else if contains "LoadBalancer" .Values.controller.serviceType }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get svc --namespace {{ template "jenkins.namespace" . }} -w {{ template "jenkins.fullname" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ template "jenkins.namespace" . }} {{ template "jenkins.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
+{{- if .Values.controller.httpsKeyStore.enable -}}
+{{- $url = print "https://$SERVICE_IP:" .Values.controller.servicePort $prefix -}}
+{{- else -}}
+{{- $url = print "http://$SERVICE_IP:" .Values.controller.servicePort $prefix -}}
+{{- end }}
+ echo {{ $url }}
+
+{{- else if contains "ClusterIP" .Values.controller.serviceType -}}
+{{- if .Values.controller.httpsKeyStore.enable -}}
+{{- $url = print "https://127.0.0.1:" .Values.controller.servicePort $prefix -}}
+{{- else -}}
+{{- $url = print "http://127.0.0.1:" .Values.controller.servicePort $prefix -}}
+{{- end }}
+ echo {{ $url }}
+ kubectl --namespace {{ template "jenkins.namespace" . }} port-forward svc/{{template "jenkins.fullname" . }} {{ .Values.controller.servicePort }}:{{ .Values.controller.servicePort }}
+{{- end }}
+{{- end }}
+
+3. Login with the password from step 1 and the username: {{ .Values.controller.admin.username }}
+4. Configure security realm and authorization strategy
+5. Use Jenkins Configuration as Code by specifying configScripts in your values.yaml file, see documentation: {{ $url }}/configuration-as-code and examples: https://github.com/jenkinsci/configuration-as-code-plugin/tree/master/demos
+
+For more information on running Jenkins on Kubernetes, visit:
+https://cloud.google.com/solutions/jenkins-on-container-engine
+
+For more information about Jenkins Configuration as Code, visit:
+https://jenkins.io/projects/jcasc/
+
+{{ if and (eq .Values.controller.image.repository "jenkins/jenkins") (eq .Values.controller.image.registry "docker.io") }}
+NOTE: Consider using a custom image with pre-installed plugins
+{{- else if .Values.controller.installPlugins }}
+NOTE: Consider disabling `installPlugins` if your image already contains plugins.
+{{- end }}
+
+{{- if .Values.persistence.enabled }}
+{{- else }}
+#################################################################################
+###### WARNING: Persistence is disabled!!! You will lose your data when #####
+###### the Jenkins pod is terminated. #####
+#################################################################################
+{{- end }}
diff --git a/charts/jenkins/templates/_helpers.tpl b/charts/jenkins/templates/_helpers.tpl
new file mode 100644
index 0000000..8301a84
--- /dev/null
+++ b/charts/jenkins/templates/_helpers.tpl
@@ -0,0 +1,655 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "jenkins.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Expand the label of the chart.
+*/}}
+{{- define "jenkins.label" -}}
+{{- printf "%s-%s" (include "jenkins.name" .) .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+
+{{/*
+Allow the release namespace to be overridden for multi-namespace deployments in combined charts.
+*/}}
+{{- define "jenkins.namespace" -}}
+ {{- if .Values.namespaceOverride -}}
+ {{- .Values.namespaceOverride -}}
+ {{- else -}}
+ {{- .Release.Namespace -}}
+ {{- end -}}
+{{- end -}}
+
+{{- define "jenkins.agent.namespace" -}}
+ {{- if .Values.agent.namespace -}}
+ {{- tpl .Values.agent.namespace . -}}
+ {{- else -}}
+ {{- if .Values.namespaceOverride -}}
+ {{- .Values.namespaceOverride -}}
+ {{- else -}}
+ {{- .Release.Namespace -}}
+ {{- end -}}
+ {{- end -}}
+{{- end -}}
+
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "jenkins.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns the admin password
+https://github.com/helm/charts/issues/5167#issuecomment-619137759
+*/}}
+{{- define "jenkins.password" -}}
+ {{- if .Values.controller.admin.password -}}
+ {{- .Values.controller.admin.password | b64enc | quote }}
+ {{- else -}}
+ {{- $secret := (lookup "v1" "Secret" .Release.Namespace (include "jenkins.fullname" .)).data -}}
+ {{- if $secret -}}
+ {{/*
+ Reusing current password since secret exists
+ */}}
+ {{- index $secret ( .Values.controller.admin.passwordKey | default "jenkins-admin-password" ) -}}
+ {{- else -}}
+ {{/*
+ Generate new password
+ */}}
+ {{- randAlphaNum 22 | b64enc | quote }}
+ {{- end -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Returns the Jenkins URL
+*/}}
+{{- define "jenkins.url" -}}
+{{- if .Values.controller.jenkinsUrl }}
+ {{- .Values.controller.jenkinsUrl }}
+{{- else }}
+ {{- if .Values.controller.ingress.hostName }}
+ {{- if .Values.controller.ingress.tls }}
+ {{- default "https" .Values.controller.jenkinsUrlProtocol }}://{{ tpl .Values.controller.ingress.hostName $ }}{{ default "" .Values.controller.jenkinsUriPrefix }}
+ {{- else }}
+ {{- default "http" .Values.controller.jenkinsUrlProtocol }}://{{ tpl .Values.controller.ingress.hostName $ }}{{ default "" .Values.controller.jenkinsUriPrefix }}
+ {{- end }}
+ {{- else }}
+ {{- default "http" .Values.controller.jenkinsUrlProtocol }}://{{ template "jenkins.fullname" . }}:{{.Values.controller.servicePort}}{{ default "" .Values.controller.jenkinsUriPrefix }}
+ {{- end}}
+{{- end}}
+{{- end -}}
+
+{{/*
+Returns configuration as code default config
+*/}}
+{{- define "jenkins.casc.defaults" -}}
+jenkins:
+ {{- $configScripts := toYaml .Values.controller.JCasC.configScripts }}
+ {{- if and (.Values.controller.JCasC.authorizationStrategy) (not (contains "authorizationStrategy:" $configScripts)) }}
+ authorizationStrategy:
+ {{- tpl .Values.controller.JCasC.authorizationStrategy . | nindent 4 }}
+ {{- end }}
+ {{- if and (.Values.controller.JCasC.securityRealm) (not (contains "securityRealm:" $configScripts)) }}
+ securityRealm:
+ {{- tpl .Values.controller.JCasC.securityRealm . | nindent 4 }}
+ {{- end }}
+ disableRememberMe: {{ .Values.controller.disableRememberMe }}
+ {{- if .Values.controller.legacyRemotingSecurityEnabled }}
+ remotingSecurity:
+ enabled: true
+ {{- end }}
+ mode: {{ .Values.controller.executorMode }}
+ numExecutors: {{ .Values.controller.numExecutors }}
+ {{- if not (kindIs "invalid" .Values.controller.customJenkinsLabels) }}
+ labelString: "{{ join " " .Values.controller.customJenkinsLabels }}"
+ {{- end }}
+ {{- if .Values.controller.projectNamingStrategy }}
+ {{- if kindIs "string" .Values.controller.projectNamingStrategy }}
+ projectNamingStrategy: "{{ .Values.controller.projectNamingStrategy }}"
+ {{- else }}
+ projectNamingStrategy:
+ {{- toYaml .Values.controller.projectNamingStrategy | nindent 4 }}
+ {{- end }}
+ {{- end }}
+ markupFormatter:
+ {{- if .Values.controller.enableRawHtmlMarkupFormatter }}
+ rawHtml:
+ disableSyntaxHighlighting: true
+ {{- else }}
+ {{- toYaml .Values.controller.markupFormatter | nindent 4 }}
+ {{- end }}
+ clouds:
+ - kubernetes:
+ containerCapStr: "{{ .Values.agent.containerCap }}"
+ {{- if .Values.agent.jnlpregistry }}
+ jnlpregistry: "{{ .Values.agent.jnlpregistry }}"
+ {{- end }}
+ defaultsProviderTemplate: "{{ .Values.agent.defaultsProviderTemplate }}"
+ connectTimeout: "{{ .Values.agent.kubernetesConnectTimeout }}"
+ readTimeout: "{{ .Values.agent.kubernetesReadTimeout }}"
+ {{- if .Values.agent.directConnection }}
+ directConnection: true
+ {{- else }}
+ {{- if .Values.agent.jenkinsUrl }}
+ jenkinsUrl: "{{ tpl .Values.agent.jenkinsUrl . }}"
+ {{- else }}
+ jenkinsUrl: "http://{{ template "jenkins.fullname" . }}.{{ template "jenkins.namespace" . }}.svc.{{.Values.clusterZone}}:{{.Values.controller.servicePort}}{{ default "" .Values.controller.jenkinsUriPrefix }}"
+ {{- end }}
+ {{- if not .Values.agent.websocket }}
+ {{- if .Values.agent.jenkinsTunnel }}
+ jenkinsTunnel: "{{ tpl .Values.agent.jenkinsTunnel . }}"
+ {{- else }}
+ jenkinsTunnel: "{{ template "jenkins.fullname" . }}-agent.{{ template "jenkins.namespace" . }}.svc.{{.Values.clusterZone}}:{{ .Values.controller.agentListenerPort }}"
+ {{- end }}
+ {{- else }}
+ webSocket: true
+ {{- end }}
+ {{- end }}
+ maxRequestsPerHostStr: {{ .Values.agent.maxRequestsPerHostStr | quote }}
+ retentionTimeout: {{ .Values.agent.retentionTimeout | quote }}
+ waitForPodSec: {{ .Values.agent.waitForPodSec | quote }}
+ name: "{{ .Values.controller.cloudName }}"
+ namespace: "{{ template "jenkins.agent.namespace" . }}"
+ restrictedPssSecurityContext: {{ .Values.agent.restrictedPssSecurityContext }}
+ serverUrl: "{{ .Values.kubernetesURL }}"
+ credentialsId: "{{ .Values.credentialsId }}"
+ {{- if .Values.agent.enabled }}
+ podLabels:
+ - key: "jenkins/{{ .Release.Name }}-{{ .Values.agent.componentName }}"
+ value: "true"
+ {{- range $key, $val := .Values.agent.podLabels }}
+ - key: {{ $key | quote }}
+ value: {{ $val | quote }}
+ {{- end }}
+ templates:
+ {{- if not .Values.agent.disableDefaultAgent }}
+ {{- include "jenkins.casc.podTemplate" . | nindent 8 }}
+ {{- end }}
+ {{- if .Values.additionalAgents }}
+ {{- /* save .Values.agent */}}
+ {{- $agent := .Values.agent }}
+ {{- range $name, $additionalAgent := .Values.additionalAgents }}
+ {{- $additionalContainersEmpty := and (hasKey $additionalAgent "additionalContainers") (empty $additionalAgent.additionalContainers) }}
+ {{- /* merge original .Values.agent into additional agent to ensure it at least has the default values */}}
+ {{- $additionalAgent := merge $additionalAgent $agent }}
+ {{- /* clear list of additional containers in case it is configured empty for this agent (merge might have overwritten that) */}}
+ {{- if $additionalContainersEmpty }}
+ {{- $_ := set $additionalAgent "additionalContainers" list }}
+ {{- end }}
+ {{- /* set .Values.agent to $additionalAgent */}}
+ {{- $_ := set $.Values "agent" $additionalAgent }}
+ {{- include "jenkins.casc.podTemplate" $ | nindent 8 }}
+ {{- end }}
+ {{- /* restore .Values.agent */}}
+ {{- $_ := set .Values "agent" $agent }}
+ {{- end }}
+ {{- if .Values.agent.podTemplates }}
+ {{- range $key, $val := .Values.agent.podTemplates }}
+ {{- tpl $val $ | nindent 8 }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.additionalClouds }}
+ {{- /* save root */}}
+ {{- $oldRoot := deepCopy $ }}
+ {{- range $name, $additionalCloud := .Values.additionalClouds }}
+ {{- $newRoot := deepCopy $ }}
+ {{- /* clear additionalAgents from the copy if override set to `true` */}}
+ {{- if .additionalAgentsOverride }}
+ {{- $_ := set $newRoot.Values "additionalAgents" list}}
+ {{- end}}
+ {{- $newValues := merge $additionalCloud $newRoot.Values }}
+ {{- $_ := set $newRoot "Values" $newValues }}
+ {{- /* clear additionalClouds from the copy */}}
+ {{- $_ := set $newRoot.Values "additionalClouds" list }}
+ {{- with $newRoot}}
+ - kubernetes:
+ containerCapStr: "{{ .Values.agent.containerCap }}"
+ {{- if .Values.agent.jnlpregistry }}
+ jnlpregistry: "{{ .Values.agent.jnlpregistry }}"
+ {{- end }}
+ defaultsProviderTemplate: "{{ .Values.agent.defaultsProviderTemplate }}"
+ connectTimeout: "{{ .Values.agent.kubernetesConnectTimeout }}"
+ readTimeout: "{{ .Values.agent.kubernetesReadTimeout }}"
+ {{- if .Values.agent.directConnection }}
+ directConnection: true
+ {{- else }}
+ {{- if .Values.agent.jenkinsUrl }}
+ jenkinsUrl: "{{ tpl .Values.agent.jenkinsUrl . }}"
+ {{- else }}
+ jenkinsUrl: "http://{{ template "jenkins.fullname" . }}.{{ template "jenkins.namespace" . }}.svc.{{.Values.clusterZone}}:{{.Values.controller.servicePort}}{{ default "" .Values.controller.jenkinsUriPrefix }}"
+ {{- end }}
+ {{- if not .Values.agent.websocket }}
+ {{- if .Values.agent.jenkinsTunnel }}
+ jenkinsTunnel: "{{ tpl .Values.agent.jenkinsTunnel . }}"
+ {{- else }}
+ jenkinsTunnel: "{{ template "jenkins.fullname" . }}-agent.{{ template "jenkins.namespace" . }}.svc.{{.Values.clusterZone}}:{{ .Values.controller.agentListenerPort }}"
+ {{- end }}
+ {{- else }}
+ webSocket: true
+ {{- end }}
+ {{- end }}
+ maxRequestsPerHostStr: {{ .Values.agent.maxRequestsPerHostStr | quote }}
+ retentionTimeout: {{ .Values.agent.retentionTimeout | quote }}
+ waitForPodSec: {{ .Values.agent.waitForPodSec | quote }}
+ name: {{ $name | quote }}
+ namespace: "{{ template "jenkins.agent.namespace" . }}"
+ restrictedPssSecurityContext: {{ .Values.agent.restrictedPssSecurityContext }}
+ serverUrl: "{{ .Values.kubernetesURL }}"
+ credentialsId: "{{ .Values.credentialsId }}"
+ {{- if .Values.agent.enabled }}
+ podLabels:
+ - key: "jenkins/{{ .Release.Name }}-{{ .Values.agent.componentName }}"
+ value: "true"
+ {{- range $key, $val := .Values.agent.podLabels }}
+ - key: {{ $key | quote }}
+ value: {{ $val | quote }}
+ {{- end }}
+ templates:
+ {{- if not .Values.agent.disableDefaultAgent }}
+ {{- include "jenkins.casc.podTemplate" . | nindent 8 }}
+ {{- end }}
+ {{- if .Values.additionalAgents }}
+ {{- /* save .Values.agent */}}
+ {{- $agent := .Values.agent }}
+ {{- range $name, $additionalAgent := .Values.additionalAgents }}
+ {{- $additionalContainersEmpty := and (hasKey $additionalAgent "additionalContainers") (empty $additionalAgent.additionalContainers) }}
+ {{- /* merge original .Values.agent into additional agent to ensure it at least has the default values */}}
+ {{- $additionalAgent := merge $additionalAgent $agent }}
+ {{- /* clear list of additional containers in case it is configured empty for this agent (merge might have overwritten that) */}}
+ {{- if $additionalContainersEmpty }}
+ {{- $_ := set $additionalAgent "additionalContainers" list }}
+ {{- end }}
+ {{- /* set .Values.agent to $additionalAgent */}}
+ {{- $_ := set $.Values "agent" $additionalAgent }}
+ {{- include "jenkins.casc.podTemplate" $ | nindent 8 }}
+ {{- end }}
+ {{- /* restore .Values.agent */}}
+ {{- $_ := set .Values "agent" $agent }}
+ {{- end }}
+ {{- with .Values.agent.podTemplates }}
+ {{- range $key, $val := . }}
+ {{- tpl $val $ | nindent 8 }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- /* restore root */}}
+ {{- $_ := set $ "Values" $oldRoot.Values }}
+ {{- end }}
+ {{- if .Values.controller.csrf.defaultCrumbIssuer.enabled }}
+ crumbIssuer:
+ standard:
+ excludeClientIPFromCrumb: {{ if .Values.controller.csrf.defaultCrumbIssuer.proxyCompatability }}true{{ else }}false{{- end }}
+ {{- end }}
+{{- include "jenkins.casc.security" . }}
+{{- with .Values.controller.scriptApproval }}
+ scriptApproval:
+ approvedSignatures:
+ {{- range $key, $val := . }}
+ - "{{ $val }}"
+ {{- end }}
+{{- end }}
+unclassified:
+ location:
+ {{- with .Values.controller.jenkinsAdminEmail }}
+ adminAddress: {{ . }}
+ {{- end }}
+ url: {{ template "jenkins.url" . }}
+{{- end -}}
+
+{{/*
+Returns a name template to be used for jcasc configmaps, using
+suffix passed in at call as index 0
+*/}}
+{{- define "jenkins.casc.configName" -}}
+{{- $name := index . 0 -}}
+{{- $root := index . 1 -}}
+"{{- include "jenkins.fullname" $root -}}-jenkins-{{ $name }}"
+{{- end -}}
+
+{{/*
+Returns kubernetes pod template configuration as code
+*/}}
+{{- define "jenkins.casc.podTemplate" -}}
+- name: "{{ .Values.agent.podName }}"
+ namespace: "{{ template "jenkins.agent.namespace" . }}"
+{{- if .Values.agent.annotations }}
+ annotations:
+ {{- range $key, $value := .Values.agent.annotations }}
+ - key: {{ $key }}
+ value: {{ $value | quote }}
+ {{- end }}
+{{- end }}
+ id: {{ sha256sum (toYaml .Values.agent) }}
+ containers:
+ - name: "{{ .Values.agent.sideContainerName }}"
+ alwaysPullImage: {{ .Values.agent.alwaysPullImage }}
+ args: "{{ .Values.agent.args | replace "$" "^$" }}"
+ {{- with .Values.agent.command }}
+ command: {{ . }}
+ {{- end }}
+ envVars:
+ - envVar:
+ {{- if .Values.agent.directConnection }}
+ key: "JENKINS_DIRECT_CONNECTION"
+ {{- if .Values.agent.jenkinsTunnel }}
+ value: "{{ tpl .Values.agent.jenkinsTunnel . }}"
+ {{- else }}
+ value: "{{ template "jenkins.fullname" . }}-agent.{{ template "jenkins.namespace" . }}.svc.{{.Values.clusterZone}}:{{ .Values.controller.agentListenerPort }}"
+ {{- end }}
+ {{- else }}
+ key: "JENKINS_URL"
+ {{- if .Values.agent.jenkinsUrl }}
+ value: {{ tpl .Values.agent.jenkinsUrl . }}
+ {{- else }}
+ value: "http://{{ template "jenkins.fullname" . }}.{{ template "jenkins.namespace" . }}.svc.{{.Values.clusterZone}}:{{.Values.controller.servicePort}}{{ default "/" .Values.controller.jenkinsUriPrefix }}"
+ {{- end }}
+ {{- end }}
+ image: "{{ .Values.agent.image.repository }}:{{ .Values.agent.image.tag }}"
+ {{- if .Values.agent.livenessProbe }}
+ livenessProbe:
+ execArgs: {{.Values.agent.livenessProbe.execArgs | quote}}
+ failureThreshold: {{.Values.agent.livenessProbe.failureThreshold}}
+ initialDelaySeconds: {{.Values.agent.livenessProbe.initialDelaySeconds}}
+ periodSeconds: {{.Values.agent.livenessProbe.periodSeconds}}
+ successThreshold: {{.Values.agent.livenessProbe.successThreshold}}
+ timeoutSeconds: {{.Values.agent.livenessProbe.timeoutSeconds}}
+ {{- end }}
+ privileged: "{{- if .Values.agent.privileged }}true{{- else }}false{{- end }}"
+ resourceLimitCpu: {{.Values.agent.resources.limits.cpu}}
+ resourceLimitMemory: {{.Values.agent.resources.limits.memory}}
+ {{- with .Values.agent.resources.limits.ephemeralStorage }}
+ resourceLimitEphemeralStorage: {{.}}
+ {{- end }}
+ resourceRequestCpu: {{.Values.agent.resources.requests.cpu}}
+ resourceRequestMemory: {{.Values.agent.resources.requests.memory}}
+ {{- with .Values.agent.resources.requests.ephemeralStorage }}
+ resourceRequestEphemeralStorage: {{.}}
+ {{- end }}
+ {{- with .Values.agent.runAsUser }}
+ runAsUser: {{ . }}
+ {{- end }}
+ {{- with .Values.agent.runAsGroup }}
+ runAsGroup: {{ . }}
+ {{- end }}
+ ttyEnabled: {{ .Values.agent.TTYEnabled }}
+ workingDir: {{ .Values.agent.workingDir }}
+{{- range $additionalContainers := .Values.agent.additionalContainers }}
+ - name: "{{ $additionalContainers.sideContainerName }}"
+ alwaysPullImage: {{ $additionalContainers.alwaysPullImage | default $.Values.agent.alwaysPullImage }}
+ args: "{{ $additionalContainers.args | replace "$" "^$" }}"
+ {{- with $additionalContainers.command }}
+ command: {{ . }}
+ {{- end }}
+ envVars:
+ - envVar:
+ key: "JENKINS_URL"
+ {{- if $additionalContainers.jenkinsUrl }}
+ value: {{ tpl ($additionalContainers.jenkinsUrl) . }}
+ {{- else }}
+ value: "http://{{ template "jenkins.fullname" $ }}.{{ template "jenkins.namespace" $ }}.svc.{{ $.Values.clusterZone }}:{{ $.Values.controller.servicePort }}{{ default "/" $.Values.controller.jenkinsUriPrefix }}"
+ {{- end }}
+ image: "{{ $additionalContainers.image.repository }}:{{ $additionalContainers.image.tag }}"
+ {{- if $additionalContainers.livenessProbe }}
+ livenessProbe:
+ execArgs: {{$additionalContainers.livenessProbe.execArgs | quote}}
+ failureThreshold: {{$additionalContainers.livenessProbe.failureThreshold}}
+ initialDelaySeconds: {{$additionalContainers.livenessProbe.initialDelaySeconds}}
+ periodSeconds: {{$additionalContainers.livenessProbe.periodSeconds}}
+ successThreshold: {{$additionalContainers.livenessProbe.successThreshold}}
+ timeoutSeconds: {{$additionalContainers.livenessProbe.timeoutSeconds}}
+ {{- end }}
+ privileged: "{{- if $additionalContainers.privileged }}true{{- else }}false{{- end }}"
+ resourceLimitCpu: {{ if $additionalContainers.resources }}{{ $additionalContainers.resources.limits.cpu }}{{ else }}{{ $.Values.agent.resources.limits.cpu }}{{ end }}
+ resourceLimitMemory: {{ if $additionalContainers.resources }}{{ $additionalContainers.resources.limits.memory }}{{ else }}{{ $.Values.agent.resources.limits.memory }}{{ end }}
+ resourceRequestCpu: {{ if $additionalContainers.resources }}{{ $additionalContainers.resources.requests.cpu }}{{ else }}{{ $.Values.agent.resources.requests.cpu }}{{ end }}
+ resourceRequestMemory: {{ if $additionalContainers.resources }}{{ $additionalContainers.resources.requests.memory }}{{ else }}{{ $.Values.agent.resources.requests.memory }}{{ end }}
+ {{- if or $additionalContainers.runAsUser $.Values.agent.runAsUser }}
+ runAsUser: {{ $additionalContainers.runAsUser | default $.Values.agent.runAsUser }}
+ {{- end }}
+ {{- if or $additionalContainers.runAsGroup $.Values.agent.runAsGroup }}
+ runAsGroup: {{ $additionalContainers.runAsGroup | default $.Values.agent.runAsGroup }}
+ {{- end }}
+ ttyEnabled: {{ $additionalContainers.TTYEnabled | default $.Values.agent.TTYEnabled }}
+ workingDir: {{ $additionalContainers.workingDir | default $.Values.agent.workingDir }}
+{{- end }}
+{{- if or .Values.agent.envVars .Values.agent.secretEnvVars }}
+ envVars:
+ {{- range $index, $var := .Values.agent.envVars }}
+ - envVar:
+ key: {{ $var.name }}
+ value: {{ tpl $var.value $ }}
+ {{- end }}
+ {{- range $index, $var := .Values.agent.secretEnvVars }}
+ - secretEnvVar:
+ key: {{ $var.key }}
+ secretName: {{ $var.secretName }}
+ secretKey: {{ $var.secretKey }}
+ optional: {{ $var.optional | default false }}
+ {{- end }}
+{{- end }}
+ idleMinutes: {{ .Values.agent.idleMinutes }}
+ instanceCap: 2147483647
+ {{- if .Values.agent.hostNetworking }}
+ hostNetwork: {{ .Values.agent.hostNetworking }}
+ {{- end }}
+ {{- if .Values.agent.imagePullSecretName }}
+ imagePullSecrets:
+ - name: {{ .Values.agent.imagePullSecretName }}
+ {{- end }}
+ label: "{{ .Release.Name }}-{{ .Values.agent.componentName }} {{ .Values.agent.customJenkinsLabels | join " " }}"
+{{- if .Values.agent.nodeSelector }}
+ nodeSelector:
+ {{- $local := dict "first" true }}
+ {{- range $key, $value := .Values.agent.nodeSelector }}
+ {{- if $local.first }} {{ else }},{{ end }}
+ {{- $key }}={{ tpl $value $ }}
+ {{- $_ := set $local "first" false }}
+ {{- end }}
+{{- end }}
+ nodeUsageMode: {{ quote .Values.agent.nodeUsageMode }}
+ podRetention: {{ .Values.agent.podRetention }}
+ showRawYaml: {{ .Values.agent.showRawYaml }}
+ serviceAccount: "{{ include "jenkins.serviceAccountAgentName" . }}"
+ slaveConnectTimeoutStr: "{{ .Values.agent.connectTimeout }}"
+{{- if .Values.agent.volumes }}
+ volumes:
+ {{- range $index, $volume := .Values.agent.volumes }}
+ -{{- if (eq $volume.type "ConfigMap") }} configMapVolume:
+ {{- else if (eq $volume.type "EmptyDir") }} emptyDirVolume:
+ {{- else if (eq $volume.type "EphemeralVolume") }} genericEphemeralVolume:
+ {{- else if (eq $volume.type "HostPath") }} hostPathVolume:
+ {{- else if (eq $volume.type "Nfs") }} nfsVolume:
+ {{- else if (eq $volume.type "PVC") }} persistentVolumeClaim:
+ {{- else if (eq $volume.type "Secret") }} secretVolume:
+ {{- else }} {{ $volume.type }}:
+ {{- end }}
+ {{- range $key, $value := $volume }}
+ {{- if not (eq $key "type") }}
+ {{ $key }}: {{ if kindIs "string" $value }}{{ tpl $value $ | quote }}{{ else }}{{ $value }}{{ end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+{{- if .Values.agent.workspaceVolume }}
+ workspaceVolume:
+ {{- if (eq .Values.agent.workspaceVolume.type "DynamicPVC") }}
+ dynamicPVC:
+ {{- else if (eq .Values.agent.workspaceVolume.type "EmptyDir") }}
+ emptyDirWorkspaceVolume:
+ {{- else if (eq .Values.agent.workspaceVolume.type "EphemeralVolume") }}
+ genericEphemeralVolume:
+ {{- else if (eq .Values.agent.workspaceVolume.type "HostPath") }}
+ hostPathWorkspaceVolume:
+ {{- else if (eq .Values.agent.workspaceVolume.type "Nfs") }}
+ nfsWorkspaceVolume:
+ {{- else if (eq .Values.agent.workspaceVolume.type "PVC") }}
+ persistentVolumeClaimWorkspaceVolume:
+ {{- else }}
+ {{ .Values.agent.workspaceVolume.type }}:
+ {{- end }}
+ {{- range $key, $value := .Values.agent.workspaceVolume }}
+ {{- if not (eq $key "type") }}
+ {{ $key }}: {{ if kindIs "string" $value }}{{ tpl $value $ | quote }}{{ else }}{{ $value }}{{ end }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+{{- if .Values.agent.yamlTemplate }}
+ yaml: |-
+ {{- tpl (trim .Values.agent.yamlTemplate) . | nindent 4 }}
+{{- end }}
+ yamlMergeStrategy: {{ .Values.agent.yamlMergeStrategy }}
+{{- end -}}
+
+{{- define "jenkins.kubernetes-version" -}}
+ {{- if .Values.controller.installPlugins -}}
+ {{- range .Values.controller.installPlugins -}}
+ {{- if hasPrefix "kubernetes:" . }}
+ {{- $split := splitList ":" . }}
+ {{- printf "%s" (index $split 1 ) -}}
+ {{- end -}}
+ {{- end -}}
+ {{- end -}}
+{{- end -}}
+
+{{- define "jenkins.casc.security" }}
+security:
+{{- with .Values.controller.JCasC }}
+{{- if .security }}
+ {{- .security | toYaml | nindent 2 }}
+{{- end }}
+{{- end }}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "jenkins.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "jenkins.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account for Jenkins agents to use
+*/}}
+{{- define "jenkins.serviceAccountAgentName" -}}
+{{- if .Values.serviceAccountAgent.create -}}
+ {{ default (printf "%s-%s" (include "jenkins.fullname" .) "agent") .Values.serviceAccountAgent.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccountAgent.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a full tag name for controller image
+*/}}
+{{- define "controller.image.tag" -}}
+{{- if .Values.controller.image.tagLabel -}}
+ {{- default (printf "%s-%s" .Chart.AppVersion .Values.controller.image.tagLabel) .Values.controller.image.tag -}}
+{{- else -}}
+ {{- default .Chart.AppVersion .Values.controller.image.tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the HTTP port for interacting with the controller
+*/}}
+{{- define "controller.httpPort" -}}
+{{- if .Values.controller.httpsKeyStore.enable -}}
+ {{- .Values.controller.httpsKeyStore.httpPort -}}
+{{- else -}}
+ {{- .Values.controller.targetPort -}}
+{{- end -}}
+{{- end -}}
+
+{{- define "jenkins.configReloadContainer" -}}
+{{- $root := index . 0 -}}
+{{- $containerName := index . 1 -}}
+{{- $containerType := index . 2 -}}
+- name: {{ $containerName }}
+ image: "{{ $root.Values.controller.sidecars.configAutoReload.image.registry }}/{{ $root.Values.controller.sidecars.configAutoReload.image.repository }}:{{ $root.Values.controller.sidecars.configAutoReload.image.tag }}"
+ imagePullPolicy: {{ $root.Values.controller.sidecars.configAutoReload.imagePullPolicy }}
+ {{- if $root.Values.controller.sidecars.configAutoReload.containerSecurityContext }}
+ securityContext: {{- toYaml $root.Values.controller.sidecars.configAutoReload.containerSecurityContext | nindent 4 }}
+ {{- end }}
+ {{- if $root.Values.controller.sidecars.configAutoReload.envFrom }}
+ envFrom:
+{{ (tpl (toYaml $root.Values.controller.sidecars.configAutoReload.envFrom) $root) | indent 4 }}
+ {{- end }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: LABEL
+ value: "{{ template "jenkins.fullname" $root }}-jenkins-config"
+ - name: FOLDER
+ value: "{{ $root.Values.controller.sidecars.configAutoReload.folder }}"
+ - name: NAMESPACE
+ value: '{{ $root.Values.controller.sidecars.configAutoReload.searchNamespace | default (include "jenkins.namespace" $root) }}'
+ {{- if eq $containerType "init" }}
+ - name: METHOD
+ value: "LIST"
+ {{- else if $root.Values.controller.sidecars.configAutoReload.sleepTime }}
+ - name: METHOD
+ value: "SLEEP"
+ - name: SLEEP_TIME
+ value: "{{ $root.Values.controller.sidecars.configAutoReload.sleepTime }}"
+ {{- end }}
+ {{- if eq $containerType "sidecar" }}
+ - name: REQ_URL
+ value: "{{- default "http" $root.Values.controller.sidecars.configAutoReload.scheme }}://localhost:{{- include "controller.httpPort" $root -}}{{- $root.Values.controller.jenkinsUriPrefix -}}/reload-configuration-as-code/?casc-reload-token=$(POD_NAME)"
+ - name: REQ_METHOD
+ value: "POST"
+ - name: REQ_RETRY_CONNECT
+ value: "{{ $root.Values.controller.sidecars.configAutoReload.reqRetryConnect }}"
+ {{- if $root.Values.controller.sidecars.configAutoReload.skipTlsVerify }}
+ - name: REQ_SKIP_TLS_VERIFY
+ value: "true"
+ {{- end }}
+ {{- end }}
+
+ {{- if $root.Values.controller.sidecars.configAutoReload.env }}
+ {{- range $envVarItem := $root.Values.controller.sidecars.configAutoReload.env -}}
+ {{- if or (ne $containerType "init") (ne .name "METHOD") }}
+{{- (tpl (toYaml (list $envVarItem)) $root) | nindent 4 }}
+ {{- end -}}
+ {{- end -}}
+ {{- end }}
+
+ resources:
+{{ toYaml $root.Values.controller.sidecars.configAutoReload.resources | indent 4 }}
+ volumeMounts:
+ - name: sc-config-volume
+ mountPath: {{ $root.Values.controller.sidecars.configAutoReload.folder | quote }}
+ - name: jenkins-home
+ mountPath: {{ $root.Values.controller.jenkinsHome }}
+ {{- if $root.Values.persistence.subPath }}
+ subPath: {{ $root.Values.persistence.subPath }}
+ {{- end }}
+
+{{- end -}}
diff --git a/charts/jenkins/templates/config-init-scripts.yaml b/charts/jenkins/templates/config-init-scripts.yaml
new file mode 100644
index 0000000..7dd253c
--- /dev/null
+++ b/charts/jenkins/templates/config-init-scripts.yaml
@@ -0,0 +1,18 @@
+{{- if .Values.controller.initScripts -}}
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "jenkins.fullname" . }}-init-scripts
+ namespace: {{ template "jenkins.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+data:
+{{- range $key, $val := .Values.controller.initScripts }}
+ init{{ $key }}.groovy: |-
+{{ tpl $val $ | indent 4 }}
+{{- end }}
+{{- end }}
diff --git a/charts/jenkins/templates/config.yaml b/charts/jenkins/templates/config.yaml
new file mode 100644
index 0000000..5de0b9f
--- /dev/null
+++ b/charts/jenkins/templates/config.yaml
@@ -0,0 +1,92 @@
+{{- $jenkinsHome := .Values.controller.jenkinsHome -}}
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "jenkins.fullname" . }}
+ namespace: {{ template "jenkins.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+data:
+ apply_config.sh: |-
+ set -e
+{{- if .Values.controller.initializeOnce }}
+ if [ -f {{ .Values.controller.jenkinsHome }}/initialization-completed ]; then
+ echo "controller was previously initialized, refusing to re-initialize"
+ exit 0
+ fi
+{{- end }}
+ echo "disable Setup Wizard"
+ # Prevent Setup Wizard when JCasC is enabled
+ echo $JENKINS_VERSION > {{ .Values.controller.jenkinsHome }}/jenkins.install.UpgradeWizard.state
+ echo $JENKINS_VERSION > {{ .Values.controller.jenkinsHome }}/jenkins.install.InstallUtil.lastExecVersion
+{{- if .Values.controller.overwritePlugins }}
+ echo "remove all plugins from shared volume"
+ # remove all plugins from shared volume
+ rm -rf {{ .Values.controller.jenkinsHome }}/plugins/*
+{{- end }}
+{{- if .Values.controller.JCasC.overwriteConfiguration }}
+ echo "deleting all XML config files"
+ rm -f {{ .Values.controller.jenkinsHome }}/config.xml
+ rm -f {{ .Values.controller.jenkinsHome }}/*plugins*.xml
+ find {{ .Values.controller.jenkinsHome }} -maxdepth 1 -type f -iname '*configuration*.xml' -exec rm -f {} \;
+{{- end }}
+{{- if .Values.controller.installPlugins }}
+ echo "download plugins"
+ # Install missing plugins
+ cp /var/jenkins_config/plugins.txt {{ .Values.controller.jenkinsHome }};
+ rm -rf {{ .Values.controller.jenkinsRef }}/plugins/*.lock
+ version () { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
+ if [ -f "{{ .Values.controller.jenkinsWar }}" ] && [ -n "$(command -v jenkins-plugin-cli)" 2>/dev/null ] && [ $(version $(jenkins-plugin-cli --version)) -ge $(version "2.1.1") ]; then
+ jenkins-plugin-cli --verbose --war "{{ .Values.controller.jenkinsWar }}" --plugin-file "{{ .Values.controller.jenkinsHome }}/plugins.txt" --latest {{ .Values.controller.installLatestPlugins }}{{- if .Values.controller.installLatestSpecifiedPlugins }} --latest-specified{{- end }};
+ else
+ /usr/local/bin/install-plugins.sh `echo $(cat {{ .Values.controller.jenkinsHome }}/plugins.txt)`;
+ fi
+ echo "copy plugins to shared volume"
+ # Copy plugins to shared volume
+ yes n | cp -i {{ .Values.controller.jenkinsRef }}/plugins/* /var/jenkins_plugins/;
+{{- end }}
+ {{- if not .Values.controller.sidecars.configAutoReload.enabled }}
+ echo "copy configuration as code files"
+ mkdir -p {{ .Values.controller.jenkinsHome }}/casc_configs;
+ rm -rf {{ .Values.controller.jenkinsHome }}/casc_configs/*
+ {{- if or .Values.controller.JCasC.defaultConfig .Values.controller.JCasC.configScripts }}
+ cp -v /var/jenkins_config/*.yaml {{ .Values.controller.jenkinsHome }}/casc_configs
+ {{- end }}
+ {{- end }}
+ echo "finished initialization"
+{{- if .Values.controller.initializeOnce }}
+ touch {{ .Values.controller.jenkinsHome }}/initialization-completed
+{{- end }}
+ {{- if not .Values.controller.sidecars.configAutoReload.enabled }}
+# Only add config to this script if we aren't auto-reloading otherwise the pod will restart upon each config change:
+{{- if .Values.controller.JCasC.defaultConfig }}
+ jcasc-default-config.yaml: |-
+ {{- include "jenkins.casc.defaults" . |nindent 4}}
+{{- end }}
+{{- range $key, $val := .Values.controller.JCasC.configScripts }}
+ {{ $key }}.yaml: |-
+{{ tpl $val $| indent 4 }}
+{{- end }}
+{{- end }}
+ plugins.txt: |-
+{{- if .Values.controller.installPlugins }}
+ {{- range $installPlugin := .Values.controller.installPlugins }}
+ {{- $installPlugin | nindent 4 }}
+ {{- end }}
+ {{- range $addlPlugin := .Values.controller.additionalPlugins }}
+ {{- /* duplicate plugin check */}}
+ {{- range $installPlugin := $.Values.controller.installPlugins }}
+ {{- if eq (splitList ":" $addlPlugin | first) (splitList ":" $installPlugin | first) }}
+ {{- $message := print "[PLUGIN CONFLICT] controller.additionalPlugins contains '" $addlPlugin "'" }}
+ {{- $message := print $message " but controller.installPlugins already contains '" $installPlugin "'." }}
+ {{- $message := print $message " Override controller.installPlugins to use '" $addlPlugin "' plugin." }}
+ {{- fail $message }}
+ {{- end }}
+ {{- end }}
+ {{- $addlPlugin | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/jenkins/templates/deprecation.yaml b/charts/jenkins/templates/deprecation.yaml
new file mode 100644
index 0000000..f54017c
--- /dev/null
+++ b/charts/jenkins/templates/deprecation.yaml
@@ -0,0 +1,151 @@
+{{- if .Values.checkDeprecation }}
+ {{- if .Values.master }}
+ {{ fail "`master` does no longer exist. It has been renamed to `controller`" }}
+ {{- end }}
+
+ {{- if .Values.controller.imageTag }}
+ {{ fail "`controller.imageTag` does no longer exist. Please use `controller.image.tag` instead" }}
+ {{- end }}
+
+ {{- if .Values.controller.slaveListenerPort }}
+ {{ fail "`controller.slaveListenerPort` does no longer exist. It has been renamed to `controller.agentListenerPort`" }}
+ {{- end }}
+
+ {{- if .Values.controller.slaveHostPort }}
+ {{ fail "`controller.slaveHostPort` does no longer exist. It has been renamed to `controller.agentListenerHostPort`" }}
+ {{- end }}
+
+ {{- if .Values.controller.slaveKubernetesNamespace }}
+ {{ fail "`controller.slaveKubernetesNamespace` does no longer exist. It has been renamed to `agent.namespace`" }}
+ {{- end }}
+
+ {{- if .Values.controller.slaveDefaultsProviderTemplate }}
+ {{ fail "`controller.slaveDefaultsProviderTemplate` does no longer exist. It has been renamed to `agent.defaultsProviderTemplate`" }}
+ {{- end }}
+
+ {{- if .Values.controller.useSecurity }}
+ {{ fail "`controller.useSecurity` does no longer exist. It has been renamed to `controller.adminSecret`" }}
+ {{- end }}
+
+ {{- if .Values.controller.slaveJenkinsUrl }}
+ {{ fail "`controller.slaveJenkinsUrl` does no longer exist. It has been renamed to `agent.jenkinsUrl`" }}
+ {{- end }}
+
+ {{- if .Values.controller.slaveJenkinsTunnel }}
+ {{ fail "`controller.slaveJenkinsTunnel` does no longer exist. It has been renamed to `agent.jenkinsTunnel`" }}
+ {{- end }}
+
+ {{- if .Values.controller.slaveConnectTimeout }}
+ {{ fail "`controller.slaveConnectTimeout` does no longer exist. It has been renamed to `agent.kubernetesConnectTimeout`" }}
+ {{- end }}
+
+ {{- if .Values.controller.slaveReadTimeout }}
+ {{ fail "`controller.slaveReadTimeout` does no longer exist. It has been renamed to `agent.kubernetesReadTimeout`" }}
+ {{- end }}
+
+ {{- if .Values.controller.slaveListenerServiceType }}
+ {{ fail "`controller.slaveListenerServiceType` does no longer exist. It has been renamed to `controller.agentListenerServiceType`" }}
+ {{- end }}
+
+ {{- if .Values.controller.slaveListenerLoadBalancerIP }}
+ {{ fail "`controller.slaveListenerLoadBalancerIP` does no longer exist. It has been renamed to `controller.agentListenerLoadBalancerIP`" }}
+ {{- end }}
+
+ {{- if .Values.controller.slaveListenerServiceAnnotations }}
+ {{ fail "`controller.slaveListenerServiceAnnotations` does no longer exist. It has been renamed to `controller.agentListenerServiceAnnotations`" }}
+ {{- end }}
+
+ {{- if .Values.agent.slaveConnectTimeout }}
+ {{ fail "`agent.slaveConnectTimeout` does no longer exist. It has been renamed to `agent.connectTimeout`" }}
+ {{- end }}
+
+ {{- if .Values.NetworkPolicy }}
+
+ {{- if .Values.NetworkPolicy.Enabled }}
+ {{ fail "`NetworkPolicy.Enabled` does no longer exist. It has been renamed to `networkPolicy.enabled`" }}
+ {{- end }}
+
+ {{- if .Values.NetworkPolicy.ApiVersion }}
+ {{ fail "`NetworkPolicy.ApiVersion` does no longer exist. It has been renamed to `networkPolicy.apiVersion`" }}
+ {{- end }}
+
+ {{ fail "NetworkPolicy.* values have been renamed, please check the documentation" }}
+ {{- end }}
+
+
+ {{- if .Values.rbac.install }}
+ {{ fail "`rbac.install` does no longer exist. It has been renamed to `rbac.create` and is enabled by default!" }}
+ {{- end }}
+
+ {{- if .Values.rbac.serviceAccountName }}
+ {{ fail "`rbac.serviceAccountName` does no longer exist. It has been renamed to `serviceAccount.name`" }}
+ {{- end }}
+
+ {{- if .Values.rbac.serviceAccountAnnotations }}
+ {{ fail "`rbac.serviceAccountAnnotations` does no longer exist. It has been renamed to `serviceAccount.annotations`" }}
+ {{- end }}
+
+ {{- if .Values.rbac.roleRef }}
+ {{ fail "`rbac.roleRef` does no longer exist. RBAC roles are now generated, please check the documentation" }}
+ {{- end }}
+
+ {{- if .Values.rbac.roleKind }}
+ {{ fail "`rbac.roleKind` does no longer exist. RBAC roles are now generated, please check the documentation" }}
+ {{- end }}
+
+ {{- if .Values.rbac.roleBindingKind }}
+ {{ fail "`rbac.roleBindingKind` does no longer exist. RBAC roles are now generated, please check the documentation" }}
+ {{- end }}
+
+ {{- if .Values.controller.JCasC.pluginVersion }}
+ {{ fail "controller.JCasC.pluginVersion has been deprecated, please use controller.installPlugins instead" }}
+ {{- end }}
+
+ {{- if .Values.controller.deploymentLabels }}
+ {{ fail "`controller.deploymentLabels` does no longer exist. It has been renamed to `controller.statefulSetLabels`" }}
+ {{- end }}
+
+ {{- if .Values.controller.deploymentAnnotations }}
+ {{ fail "`controller.deploymentAnnotations` does no longer exist. It has been renamed to `controller.statefulSetAnnotations`" }}
+ {{- end }}
+
+ {{- if .Values.controller.rollingUpdate }}
+ {{ fail "`controller.rollingUpdate` does no longer exist. It is no longer relevant, since a StatefulSet is used for the Jenkins controller" }}
+ {{- end }}
+
+ {{- if .Values.controller.tag }}
+ {{ fail "`controller.tag` no longer exists. It has been renamed to `controller.image.tag'" }}
+ {{- end }}
+
+ {{- if .Values.controller.tagLabel }}
+ {{ fail "`controller.tagLabel` no longer exists. It has been renamed to `controller.image.tagLabel`" }}
+ {{- end }}
+
+ {{- if .Values.controller.adminSecret }}
+ {{ fail "`controller.adminSecret` no longer exists. It has been renamed to `controller.admin.createSecret`" }}
+ {{- end }}
+
+ {{- if .Values.controller.adminUser }}
+ {{ fail "`controller.adminUser` no longer exists. It has been renamed to `controller.admin.username`" }}
+ {{- end }}
+
+ {{- if .Values.controller.adminPassword }}
+ {{ fail "`controller.adminPassword` no longer exists. It has been renamed to `controller.admin.password`" }}
+ {{- end }}
+
+ {{- if .Values.controller.sidecars.other }}
+ {{ fail "`controller.sidecars.other` no longer exists. It has been renamed to `controller.sidecars.additionalSidecarContainers`" }}
+ {{- end }}
+
+ {{- if .Values.agent.tag }}
+ {{ fail "`controller.agent.tag` no longer exists. It has been renamed to `controller.agent.image.tag`" }}
+ {{- end }}
+
+ {{- if .Values.backup }}
+ {{ fail "`controller.backup` no longer exists." }}
+ {{- end }}
+
+ {{- if .Values.helmtest.bats.tag }}
+ {{ fail "`helmtest.bats.tag` no longer exists. It has been renamed to `helmtest.bats.image.tag`" }}
+ {{- end }}
+{{- end }}
diff --git a/charts/jenkins/templates/home-pvc.yaml b/charts/jenkins/templates/home-pvc.yaml
new file mode 100644
index 0000000..f417d23
--- /dev/null
+++ b/charts/jenkins/templates/home-pvc.yaml
@@ -0,0 +1,41 @@
+{{- if not (contains "jenkins-home" (quote .Values.persistence.volumes)) }}
+{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+{{- if .Values.persistence.annotations }}
+ annotations:
+{{ toYaml .Values.persistence.annotations | indent 4 }}
+{{- end }}
+ name: {{ template "jenkins.fullname" . }}
+ namespace: {{ template "jenkins.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+{{- if .Values.persistence.labels }}
+{{ toYaml .Values.persistence.labels | indent 4 }}
+{{- end }}
+spec:
+{{- if .Values.persistence.dataSource }}
+ dataSource:
+{{ toYaml .Values.persistence.dataSource | indent 4 }}
+{{- end }}
+ accessModes:
+ - {{ .Values.persistence.accessMode | quote }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size | quote }}
+{{- if .Values.persistence.storageClass }}
+{{- if (eq "-" .Values.persistence.storageClass) }}
+ storageClassName: ""
+{{- else }}
+ storageClassName: "{{ .Values.persistence.storageClass }}"
+{{- end }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/jenkins/templates/jcasc-config.yaml b/charts/jenkins/templates/jcasc-config.yaml
new file mode 100644
index 0000000..e404194
--- /dev/null
+++ b/charts/jenkins/templates/jcasc-config.yaml
@@ -0,0 +1,45 @@
+{{- $root := . }}
+{{- if .Values.controller.sidecars.configAutoReload.enabled }}
+{{- range $key, $val := .Values.controller.JCasC.configScripts }}
+{{- if $val }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "jenkins.casc.configName" (list (printf "config-%s" $key) $ )}}
+ namespace: {{ template "jenkins.namespace" $root }}
+ labels:
+ "app.kubernetes.io/name": {{ template "jenkins.name" $root}}
+ {{- if $root.Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ $root.Chart.Name }}-{{ $root.Chart.Version }}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ $.Release.Service }}"
+ "app.kubernetes.io/instance": "{{ $.Release.Name }}"
+ "app.kubernetes.io/component": "{{ $.Values.controller.componentName }}"
+ {{ template "jenkins.fullname" $root }}-jenkins-config: "true"
+data:
+ {{ $key }}.yaml: |-
+{{ tpl $val $| indent 4 }}
+{{- end }}
+{{- end }}
+{{- if .Values.controller.JCasC.defaultConfig }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "jenkins.casc.configName" (list "jcasc-config" $ )}}
+ namespace: {{ template "jenkins.namespace" $root }}
+ labels:
+ "app.kubernetes.io/name": {{ template "jenkins.name" $root}}
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ $root.Chart.Name }}-{{ $root.Chart.Version }}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ $.Release.Service }}"
+ "app.kubernetes.io/instance": "{{ $.Release.Name }}"
+ "app.kubernetes.io/component": "{{ $.Values.controller.componentName }}"
+ {{ template "jenkins.fullname" $root }}-jenkins-config: "true"
+data:
+ jcasc-default-config.yaml: |-
+ {{- include "jenkins.casc.defaults" . | nindent 4 }}
+{{- end}}
+{{- end }}
diff --git a/charts/jenkins/templates/jenkins-agent-svc.yaml b/charts/jenkins/templates/jenkins-agent-svc.yaml
new file mode 100644
index 0000000..4440b91
--- /dev/null
+++ b/charts/jenkins/templates/jenkins-agent-svc.yaml
@@ -0,0 +1,43 @@
+{{- if .Values.controller.agentListenerEnabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "jenkins.fullname" . }}-agent
+ namespace: {{ template "jenkins.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+ {{- if .Values.controller.agentListenerServiceAnnotations }}
+ annotations:
+ {{- toYaml .Values.controller.agentListenerServiceAnnotations | nindent 4 }}
+ {{- end }}
+spec:
+ {{- if .Values.controller.agentListenerExternalTrafficPolicy }}
+ externalTrafficPolicy: {{.Values.controller.agentListenerExternalTrafficPolicy}}
+ {{- end }}
+ ports:
+ - port: {{ .Values.controller.agentListenerPort }}
+ targetPort: {{ .Values.controller.agentListenerPort }}
+ {{- if (and (eq .Values.controller.agentListenerServiceType "NodePort") (not (empty .Values.controller.agentListenerNodePort))) }}
+ nodePort: {{ .Values.controller.agentListenerNodePort }}
+ {{- end }}
+ name: agent-listener
+ selector:
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ type: {{ .Values.controller.agentListenerServiceType }}
+ {{if eq .Values.controller.agentListenerServiceType "LoadBalancer"}}
+{{- if .Values.controller.agentListenerLoadBalancerSourceRanges }}
+ loadBalancerSourceRanges:
+{{ toYaml .Values.controller.agentListenerLoadBalancerSourceRanges | indent 4 }}
+{{- end }}
+ {{- end }}
+ {{- if and (eq .Values.controller.agentListenerServiceType "LoadBalancer") (.Values.controller.agentListenerLoadBalancerIP) }}
+ loadBalancerIP: {{ .Values.controller.agentListenerLoadBalancerIP }}
+ {{- end }}
+ {{- end }}
diff --git a/charts/jenkins/templates/jenkins-aws-security-group-policies.yaml b/charts/jenkins/templates/jenkins-aws-security-group-policies.yaml
new file mode 100644
index 0000000..2f6e7a1
--- /dev/null
+++ b/charts/jenkins/templates/jenkins-aws-security-group-policies.yaml
@@ -0,0 +1,16 @@
+{{- if .Values.awsSecurityGroupPolicies.enabled -}}
+{{- range .Values.awsSecurityGroupPolicies.policies -}}
+apiVersion: vpcresources.k8s.aws/v1beta1
+kind: SecurityGroupPolicy
+metadata:
+ name: {{ .name }}
+ namespace: {{ template "jenkins.namespace" $ }}
+spec:
+ podSelector:
+ {{- toYaml .podSelector | nindent 6}}
+ securityGroups:
+ groupIds:
+ {{- toYaml .securityGroupIds | nindent 6}}
+---
+{{- end -}}
+{{- end -}}
diff --git a/charts/jenkins/templates/jenkins-controller-alerting-rules.yaml b/charts/jenkins/templates/jenkins-controller-alerting-rules.yaml
new file mode 100644
index 0000000..3fd8061
--- /dev/null
+++ b/charts/jenkins/templates/jenkins-controller-alerting-rules.yaml
@@ -0,0 +1,26 @@
+{{- if and .Values.controller.prometheus.enabled .Values.controller.prometheus.alertingrules }}
+---
+apiVersion: monitoring.coreos.com/v1
+kind: PrometheusRule
+metadata:
+ name: {{ template "jenkins.fullname" . }}
+{{- if .Values.controller.prometheus.prometheusRuleNamespace }}
+ namespace: {{ .Values.controller.prometheus.prometheusRuleNamespace }}
+{{- else }}
+ namespace: {{ template "jenkins.namespace" . }}
+{{- end }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+ {{- range $key, $val := .Values.controller.prometheus.alertingRulesAdditionalLabels }}
+ {{ $key }}: {{ $val | quote }}
+ {{- end}}
+spec:
+ groups:
+{{ toYaml .Values.controller.prometheus.alertingrules | indent 2 }}
+{{- end }}
diff --git a/charts/jenkins/templates/jenkins-controller-backendconfig.yaml b/charts/jenkins/templates/jenkins-controller-backendconfig.yaml
new file mode 100644
index 0000000..0e8a566
--- /dev/null
+++ b/charts/jenkins/templates/jenkins-controller-backendconfig.yaml
@@ -0,0 +1,24 @@
+{{- if .Values.controller.backendconfig.enabled }}
+apiVersion: {{ .Values.controller.backendconfig.apiVersion }}
+kind: BackendConfig
+metadata:
+ name: {{ .Values.controller.backendconfig.name }}
+ namespace: {{ template "jenkins.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+{{- if .Values.controller.backendconfig.labels }}
+{{ toYaml .Values.controller.backendconfig.labels | indent 4 }}
+{{- end }}
+{{- if .Values.controller.backendconfig.annotations }}
+ annotations:
+{{ toYaml .Values.controller.backendconfig.annotations | indent 4 }}
+{{- end }}
+spec:
+{{ toYaml .Values.controller.backendconfig.spec | indent 2 }}
+{{- end }}
diff --git a/charts/jenkins/templates/jenkins-controller-ingress.yaml b/charts/jenkins/templates/jenkins-controller-ingress.yaml
new file mode 100644
index 0000000..b3b344f
--- /dev/null
+++ b/charts/jenkins/templates/jenkins-controller-ingress.yaml
@@ -0,0 +1,77 @@
+{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }}
+{{- if .Values.controller.ingress.enabled }}
+{{- if semverCompare ">=1.19-0" $kubeTargetVersion -}}
+apiVersion: networking.k8s.io/v1
+{{- else if semverCompare ">=1.14-0" $kubeTargetVersion -}}
+apiVersion: networking.k8s.io/v1beta1
+{{- else -}}
+apiVersion: {{ .Values.controller.ingress.apiVersion }}
+{{- end }}
+kind: Ingress
+metadata:
+ namespace: {{ template "jenkins.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+{{- if .Values.controller.ingress.labels }}
+{{ toYaml .Values.controller.ingress.labels | indent 4 }}
+{{- end }}
+{{- if .Values.controller.ingress.annotations }}
+ annotations:
+{{ toYaml .Values.controller.ingress.annotations | indent 4 }}
+{{- end }}
+ name: {{ template "jenkins.fullname" . }}
+spec:
+{{- if .Values.controller.ingress.ingressClassName }}
+ ingressClassName: {{ .Values.controller.ingress.ingressClassName | quote }}
+{{- end }}
+ rules:
+ - http:
+ paths:
+{{- if empty (.Values.controller.ingress.paths) }}
+ - backend:
+{{- if semverCompare ">=1.19-0" $kubeTargetVersion }}
+ service:
+ name: {{ template "jenkins.fullname" . }}
+ port:
+ number: {{ .Values.controller.servicePort }}
+ pathType: ImplementationSpecific
+{{- else }}
+ serviceName: {{ template "jenkins.fullname" . }}
+ servicePort: {{ .Values.controller.servicePort }}
+{{- end }}
+{{- if .Values.controller.ingress.path }}
+ path: {{ .Values.controller.ingress.path }}
+{{- end -}}
+{{- else }}
+{{ tpl (toYaml .Values.controller.ingress.paths | indent 6) . }}
+{{- end -}}
+{{- if .Values.controller.ingress.hostName }}
+ host: {{ tpl .Values.controller.ingress.hostName . | quote }}
+{{- end }}
+{{- if .Values.controller.ingress.resourceRootUrl }}
+ - http:
+ paths:
+ - backend:
+{{- if semverCompare ">=1.19-0" $kubeTargetVersion }}
+ service:
+ name: {{ template "jenkins.fullname" . }}
+ port:
+ number: {{ .Values.controller.servicePort }}
+ pathType: ImplementationSpecific
+{{- else }}
+ serviceName: {{ template "jenkins.fullname" . }}
+ servicePort: {{ .Values.controller.servicePort }}
+{{- end }}
+ host: {{ tpl .Values.controller.ingress.resourceRootUrl . | quote }}
+{{- end }}
+{{- if .Values.controller.ingress.tls }}
+ tls:
+{{ tpl (toYaml .Values.controller.ingress.tls ) . | indent 4 }}
+{{- end -}}
+{{- end }}
diff --git a/charts/jenkins/templates/jenkins-controller-networkpolicy.yaml b/charts/jenkins/templates/jenkins-controller-networkpolicy.yaml
new file mode 100644
index 0000000..82835f2
--- /dev/null
+++ b/charts/jenkins/templates/jenkins-controller-networkpolicy.yaml
@@ -0,0 +1,76 @@
+{{- if .Values.networkPolicy.enabled }}
+kind: NetworkPolicy
+apiVersion: {{ .Values.networkPolicy.apiVersion }}
+metadata:
+ name: "{{ .Release.Name }}-{{ .Values.controller.componentName }}"
+ namespace: {{ template "jenkins.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+spec:
+ podSelector:
+ matchLabels:
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ ingress:
+ # Allow web access to the UI
+ - ports:
+ - port: {{ .Values.controller.targetPort }}
+ {{- if .Values.controller.agentListenerEnabled }}
+ # Allow inbound connections from agents
+ - from:
+ {{- if .Values.networkPolicy.internalAgents.allowed }}
+ - podSelector:
+ matchLabels:
+ "jenkins/{{ .Release.Name }}-{{ .Values.agent.componentName }}": "true"
+ {{- range $k,$v:= .Values.networkPolicy.internalAgents.podLabels }}
+ {{ $k }}: {{ $v }}
+ {{- end }}
+ {{- if .Values.networkPolicy.internalAgents.namespaceLabels }}
+ namespaceSelector:
+ matchLabels:
+ {{- range $k,$v:= .Values.networkPolicy.internalAgents.namespaceLabels }}
+ {{ $k }}: {{ $v }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- if or .Values.networkPolicy.externalAgents.ipCIDR .Values.networkPolicy.externalAgents.except }}
+ - ipBlock:
+ cidr: {{ required "ipCIDR is required if you wish to allow external agents to connect to Jenkins Controller." .Values.networkPolicy.externalAgents.ipCIDR }}
+ {{- if .Values.networkPolicy.externalAgents.except }}
+ except:
+ {{- range .Values.networkPolicy.externalAgents.except }}
+ - {{ . }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - port: {{ .Values.controller.agentListenerPort }}
+ {{- end }}
+{{- if .Values.agent.enabled }}
+---
+kind: NetworkPolicy
+apiVersion: {{ .Values.networkPolicy.apiVersion }}
+metadata:
+ name: "{{ .Release.Name }}-{{ .Values.agent.componentName }}"
+ namespace: {{ template "jenkins.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+spec:
+ podSelector:
+ matchLabels:
+ # DefaultDeny
+ "jenkins/{{ .Release.Name }}-{{ .Values.agent.componentName }}": "true"
+{{- end }}
+{{- end }}
diff --git a/charts/jenkins/templates/jenkins-controller-pdb.yaml b/charts/jenkins/templates/jenkins-controller-pdb.yaml
new file mode 100644
index 0000000..9dc1faf
--- /dev/null
+++ b/charts/jenkins/templates/jenkins-controller-pdb.yaml
@@ -0,0 +1,34 @@
+{{- if .Values.controller.podDisruptionBudget.enabled }}
+{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }}
+{{- if semverCompare ">=1.21-0" $kubeTargetVersion -}}
+apiVersion: policy/v1
+{{- else if semverCompare ">=1.5-0" $kubeTargetVersion -}}
+apiVersion: policy/v1beta1
+{{- else -}}
+apiVersion: {{ .Values.controller.podDisruptionBudget.apiVersion }}
+{{- end }}
+kind: PodDisruptionBudget
+metadata:
+ name: {{ template "jenkins.fullname" . }}-pdb
+ namespace: {{ template "jenkins.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+ {{- if .Values.controller.podDisruptionBudget.labels -}}
+ {{ toYaml .Values.controller.podDisruptionBudget.labels | nindent 4 }}
+ {{- end }}
+ {{- if .Values.controller.podDisruptionBudget.annotations }}
+ annotations: {{ toYaml .Values.controller.podDisruptionBudget.annotations | nindent 4 }}
+ {{- end }}
+spec:
+ maxUnavailable: {{ .Values.controller.podDisruptionBudget.maxUnavailable }}
+ selector:
+ matchLabels:
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+{{- end }}
diff --git a/charts/jenkins/templates/jenkins-controller-podmonitor.yaml b/charts/jenkins/templates/jenkins-controller-podmonitor.yaml
new file mode 100644
index 0000000..9a04019
--- /dev/null
+++ b/charts/jenkins/templates/jenkins-controller-podmonitor.yaml
@@ -0,0 +1,30 @@
+{{- if .Values.controller.googlePodMonitor.enabled }}
+apiVersion: monitoring.googleapis.com/v1
+kind: PodMonitoring
+
+metadata:
+ name: {{ template "jenkins.fullname" . }}
+{{- if .Values.controller.googlePodMonitor.serviceMonitorNamespace }}
+ namespace: {{ .Values.controller.googlePodMonitor.serviceMonitorNamespace }}
+{{- else }}
+ namespace: {{ template "jenkins.namespace" . }}
+{{- end }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+
+spec:
+ endpoints:
+ - interval: {{ .Values.controller.googlePodMonitor.scrapeInterval }}
+ port: http
+ path: {{ .Values.controller.jenkinsUriPrefix }}{{ .Values.controller.googlePodMonitor.scrapeEndpoint }}
+ selector:
+ matchLabels:
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+{{- end }}
diff --git a/charts/jenkins/templates/jenkins-controller-route.yaml b/charts/jenkins/templates/jenkins-controller-route.yaml
new file mode 100644
index 0000000..3550380
--- /dev/null
+++ b/charts/jenkins/templates/jenkins-controller-route.yaml
@@ -0,0 +1,34 @@
+{{- if .Values.controller.route.enabled }}
+apiVersion: route.openshift.io/v1
+kind: Route
+metadata:
+ namespace: {{ template "jenkins.namespace" . }}
+ labels:
+ app: {{ template "jenkins.fullname" . }}
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ component: "{{ .Release.Name }}-{{ .Values.controller.componentName }}"
+{{- if .Values.controller.route.labels }}
+{{ toYaml .Values.controller.route.labels | indent 4 }}
+{{- end }}
+{{- if .Values.controller.route.annotations }}
+ annotations:
+{{ toYaml .Values.controller.route.annotations | indent 4 }}
+{{- end }}
+ name: {{ template "jenkins.fullname" . }}
+spec:
+ host: {{ .Values.controller.route.path }}
+ port:
+ targetPort: http
+ tls:
+ insecureEdgeTerminationPolicy: Redirect
+ termination: edge
+ to:
+ kind: Service
+ name: {{ template "jenkins.fullname" . }}
+ weight: 100
+ wildcardPolicy: None
+{{- end }}
diff --git a/charts/jenkins/templates/jenkins-controller-secondary-ingress.yaml b/charts/jenkins/templates/jenkins-controller-secondary-ingress.yaml
new file mode 100644
index 0000000..c63e482
--- /dev/null
+++ b/charts/jenkins/templates/jenkins-controller-secondary-ingress.yaml
@@ -0,0 +1,56 @@
+{{- if .Values.controller.secondaryingress.enabled }}
+{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }}
+{{- $serviceName := include "jenkins.fullname" . -}}
+{{- $servicePort := .Values.controller.servicePort -}}
+{{- if semverCompare ">=1.19-0" $kubeTargetVersion -}}
+apiVersion: networking.k8s.io/v1
+{{- else if semverCompare ">=1.14-0" $kubeTargetVersion -}}
+apiVersion: networking.k8s.io/v1beta1
+{{- else -}}
+apiVersion: {{ .Values.controller.secondaryingress.apiVersion }}
+{{- end }}
+kind: Ingress
+metadata:
+ namespace: {{ template "jenkins.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+ {{- if .Values.controller.secondaryingress.labels -}}
+ {{ toYaml .Values.controller.secondaryingress.labels | nindent 4 }}
+ {{- end }}
+ {{- if .Values.controller.secondaryingress.annotations }}
+ annotations: {{ toYaml .Values.controller.secondaryingress.annotations | nindent 4 }}
+ {{- end }}
+ name: {{ template "jenkins.fullname" . }}-secondary
+spec:
+{{- if .Values.controller.secondaryingress.ingressClassName }}
+ ingressClassName: {{ .Values.controller.secondaryingress.ingressClassName | quote }}
+{{- end }}
+ rules:
+ - host: {{ .Values.controller.secondaryingress.hostName }}
+ http:
+ paths:
+ {{- range .Values.controller.secondaryingress.paths }}
+ - path: {{ . | quote }}
+ backend:
+{{ if semverCompare ">=1.19-0" $kubeTargetVersion }}
+ service:
+ name: {{ $serviceName }}
+ port:
+ number: {{ $servicePort }}
+ pathType: ImplementationSpecific
+{{ else }}
+ serviceName: {{ $serviceName }}
+ servicePort: {{ $servicePort }}
+{{ end }}
+ {{- end}}
+{{- if .Values.controller.secondaryingress.tls }}
+ tls:
+{{ toYaml .Values.controller.secondaryingress.tls | indent 4 }}
+{{- end -}}
+{{- end }}
diff --git a/charts/jenkins/templates/jenkins-controller-servicemonitor.yaml b/charts/jenkins/templates/jenkins-controller-servicemonitor.yaml
new file mode 100644
index 0000000..8710b2b
--- /dev/null
+++ b/charts/jenkins/templates/jenkins-controller-servicemonitor.yaml
@@ -0,0 +1,45 @@
+{{- if and .Values.controller.prometheus.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+
+metadata:
+ name: {{ template "jenkins.fullname" . }}
+{{- if .Values.controller.prometheus.serviceMonitorNamespace }}
+ namespace: {{ .Values.controller.prometheus.serviceMonitorNamespace }}
+{{- else }}
+ namespace: {{ template "jenkins.namespace" . }}
+{{- end }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+ {{- range $key, $val := .Values.controller.prometheus.serviceMonitorAdditionalLabels }}
+ {{ $key }}: {{ $val | quote }}
+ {{- end}}
+
+spec:
+ endpoints:
+ - interval: {{ .Values.controller.prometheus.scrapeInterval }}
+ port: http
+ path: {{ .Values.controller.jenkinsUriPrefix }}{{ .Values.controller.prometheus.scrapeEndpoint }}
+ {{- with .Values.controller.prometheus.relabelings }}
+ relabelings:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ {{- with .Values.controller.prometheus.metricRelabelings }}
+ metricRelabelings:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ jobLabel: {{ template "jenkins.fullname" . }}
+ namespaceSelector:
+ matchNames:
+ - "{{ template "jenkins.namespace" $ }}"
+ selector:
+ matchLabels:
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+{{- end }}
diff --git a/charts/jenkins/templates/jenkins-controller-statefulset.yaml b/charts/jenkins/templates/jenkins-controller-statefulset.yaml
new file mode 100644
index 0000000..ca0edc6
--- /dev/null
+++ b/charts/jenkins/templates/jenkins-controller-statefulset.yaml
@@ -0,0 +1,413 @@
+{{- if .Capabilities.APIVersions.Has "apps/v1" }}
+apiVersion: apps/v1
+{{- else }}
+apiVersion: apps/v1beta1
+{{- end }}
+kind: StatefulSet
+metadata:
+ name: {{ template "jenkins.fullname" . }}
+ namespace: {{ template "jenkins.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+ {{- range $key, $val := .Values.controller.statefulSetLabels }}
+ {{ $key }}: {{ $val | quote }}
+ {{- end}}
+ {{- if .Values.controller.statefulSetAnnotations }}
+ annotations:
+{{ toYaml .Values.controller.statefulSetAnnotations | indent 4 }}
+ {{- end }}
+spec:
+ serviceName: {{ template "jenkins.fullname" . }}
+ replicas: 1
+ selector:
+ matchLabels:
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ {{- if .Values.controller.updateStrategy }}
+ updateStrategy:
+{{ toYaml .Values.controller.updateStrategy | indent 4 }}
+ {{- end }}
+ template:
+ metadata:
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+ {{- range $key, $val := .Values.controller.podLabels }}
+ {{ $key }}: {{ $val | quote }}
+ {{- end}}
+ annotations:
+ checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum }}
+ {{- if .Values.controller.initScripts }}
+ checksum/config-init-scripts: {{ include (print $.Template.BasePath "/config-init-scripts.yaml") . | sha256sum }}
+ {{- end }}
+ {{- if .Values.controller.podAnnotations }}
+{{ tpl (toYaml .Values.controller.podAnnotations | indent 8) . }}
+ {{- end }}
+ spec:
+ {{- if .Values.controller.schedulerName }}
+ schedulerName: {{ .Values.controller.schedulerName }}
+ {{- end }}
+ {{- if .Values.controller.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.controller.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.controller.tolerations }}
+ tolerations:
+{{ toYaml .Values.controller.tolerations | indent 8 }}
+ {{- end }}
+ {{- if .Values.controller.affinity }}
+ affinity:
+{{ toYaml .Values.controller.affinity | indent 8 }}
+ {{- end }}
+ {{- if quote .Values.controller.terminationGracePeriodSeconds }}
+ terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }}
+ {{- end }}
+ {{- if .Values.controller.priorityClassName }}
+ priorityClassName: {{ .Values.controller.priorityClassName }}
+ {{- end }}
+ {{- if .Values.controller.shareProcessNamespace }}
+ shareProcessNamespace: true
+ {{- end }}
+{{- if .Values.controller.usePodSecurityContext }}
+ securityContext:
+ {{- if kindIs "map" .Values.controller.podSecurityContextOverride }}
+ {{- tpl (toYaml .Values.controller.podSecurityContextOverride | nindent 8) . -}}
+ {{- else }}
+ {{/* The rest of this section should be replaced with the contents of this comment one the runAsUser, fsGroup, and securityContextCapabilities Helm chart values have been removed:
+ runAsUser: 1000
+ fsGroup: 1000
+ runAsNonRoot: true
+ */}}
+ runAsUser: {{ default 0 .Values.controller.runAsUser }}
+ {{- if and (.Values.controller.runAsUser) (.Values.controller.fsGroup) }}
+ {{- if not (eq (int .Values.controller.runAsUser) 0) }}
+ fsGroup: {{ .Values.controller.fsGroup }}
+ runAsNonRoot: true
+ {{- end }}
+ {{- if .Values.controller.securityContextCapabilities }}
+ capabilities:
+ {{- toYaml .Values.controller.securityContextCapabilities | nindent 10 }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+ serviceAccountName: "{{ template "jenkins.serviceAccountName" . }}"
+{{- if .Values.controller.hostNetworking }}
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+{{- end }}
+ {{- if .Values.controller.hostAliases }}
+ hostAliases:
+ {{- toYaml .Values.controller.hostAliases | nindent 8 }}
+ {{- end }}
+ initContainers:
+{{- if .Values.controller.customInitContainers }}
+{{ tpl (toYaml .Values.controller.customInitContainers) . | indent 8 }}
+{{- end }}
+
+{{- if .Values.controller.sidecars.configAutoReload.enabled }}
+{{- include "jenkins.configReloadContainer" (list $ "config-reload-init" "init") | nindent 8 }}
+{{- end}}
+
+ - name: "init"
+ image: "{{ .Values.controller.image.registry }}/{{ .Values.controller.image.repository }}:{{- include "controller.image.tag" . -}}"
+ imagePullPolicy: "{{ .Values.controller.image.pullPolicy }}"
+ {{- if .Values.controller.containerSecurityContext }}
+ securityContext: {{- toYaml .Values.controller.containerSecurityContext | nindent 12 }}
+ {{- end }}
+ command: [ "sh", "/var/jenkins_config/apply_config.sh" ]
+ {{- if .Values.controller.initContainerEnvFrom }}
+ envFrom:
+{{ (tpl (toYaml .Values.controller.initContainerEnvFrom) .) | indent 12 }}
+ {{- end }}
+ {{- if .Values.controller.initContainerEnv }}
+ env:
+{{ (tpl (toYaml .Values.controller.initContainerEnv) .) | indent 12 }}
+ {{- end }}
+ resources:
+{{- if .Values.controller.initContainerResources }}
+{{ toYaml .Values.controller.initContainerResources | indent 12 }}
+{{- else }}
+{{ toYaml .Values.controller.resources | indent 12 }}
+{{- end }}
+ volumeMounts:
+ {{- if .Values.persistence.mounts }}
+{{ toYaml .Values.persistence.mounts | indent 12 }}
+ {{- end }}
+ - mountPath: {{ .Values.controller.jenkinsHome }}
+ name: jenkins-home
+ {{- if .Values.persistence.subPath }}
+ subPath: {{ .Values.persistence.subPath }}
+ {{- end }}
+ - mountPath: /var/jenkins_config
+ name: jenkins-config
+ {{- if .Values.controller.installPlugins }}
+ {{- if .Values.controller.overwritePluginsFromImage }}
+ - mountPath: {{ .Values.controller.jenkinsRef }}/plugins
+ name: plugins
+ {{- end }}
+ - mountPath: /var/jenkins_plugins
+ name: plugin-dir
+ - mountPath: /tmp
+ name: tmp-volume
+ {{- end }}
+ {{- if or .Values.controller.initScripts .Values.controller.initConfigMap }}
+ - mountPath: {{ .Values.controller.jenkinsHome }}/init.groovy.d
+ name: init-scripts
+ {{- end }}
+ {{- if and .Values.controller.httpsKeyStore.enable (not .Values.controller.httpsKeyStore.disableSecretMount) }}
+ {{- $httpsJKSDirPath := printf "%s" .Values.controller.httpsKeyStore.path }}
+ - mountPath: {{ $httpsJKSDirPath }}
+ name: jenkins-https-keystore
+ {{- end }}
+ containers:
+ - name: jenkins
+ image: "{{ .Values.controller.image.registry }}/{{ .Values.controller.image.repository }}:{{- include "controller.image.tag" . -}}"
+ imagePullPolicy: "{{ .Values.controller.image.pullPolicy }}"
+ {{- if .Values.controller.containerSecurityContext }}
+ securityContext: {{- toYaml .Values.controller.containerSecurityContext | nindent 12 }}
+ {{- end }}
+ {{- if .Values.controller.overrideArgs }}
+ args: [
+ {{- range $overrideArg := .Values.controller.overrideArgs }}
+ "{{- tpl $overrideArg $ }}",
+ {{- end }}
+ ]
+ {{- else if .Values.controller.httpsKeyStore.enable }}
+ {{- $httpsJKSFilePath := printf "%s/%s" .Values.controller.httpsKeyStore.path .Values.controller.httpsKeyStore.fileName }}
+ args: [ "--httpPort={{.Values.controller.httpsKeyStore.httpPort}}", "--httpsPort={{.Values.controller.targetPort}}", '--httpsKeyStore={{ $httpsJKSFilePath }}', "--httpsKeyStorePassword=$(JENKINS_HTTPS_KEYSTORE_PASSWORD)" ]
+ {{- else }}
+ args: [ "--httpPort={{.Values.controller.targetPort}}"]
+ {{- end }}
+ {{- if .Values.controller.lifecycle }}
+ lifecycle:
+{{ toYaml .Values.controller.lifecycle | indent 12 }}
+ {{- end }}
+{{- if .Values.controller.terminationMessagePath }}
+ terminationMessagePath: {{ .Values.controller.terminationMessagePath }}
+{{- end }}
+{{- if .Values.controller.terminationMessagePolicy }}
+ terminationMessagePolicy: {{ .Values.controller.terminationMessagePolicy }}
+{{- end }}
+ {{- if .Values.controller.containerEnvFrom }}
+ envFrom:
+{{ (tpl ( toYaml .Values.controller.containerEnvFrom) .) | indent 12 }}
+ {{- end }}
+ env:
+ {{- if .Values.controller.containerEnv }}
+{{ (tpl ( toYaml .Values.controller.containerEnv) .) | indent 12 }}
+ {{- end }}
+ {{- if or .Values.controller.additionalSecrets .Values.controller.existingSecret .Values.controller.additionalExistingSecrets .Values.controller.admin.createSecret }}
+ - name: SECRETS
+ value: /run/secrets/additional
+ {{- end }}
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: JAVA_OPTS
+ value: >-
+ {{ if .Values.controller.sidecars.configAutoReload.enabled }} -Dcasc.reload.token=$(POD_NAME) {{ end }}{{ default "" .Values.controller.javaOpts }}
+ - name: JENKINS_OPTS
+ value: >-
+ {{ if .Values.controller.jenkinsUriPrefix }}--prefix={{ .Values.controller.jenkinsUriPrefix }} {{ end }} --webroot=/var/jenkins_cache/war {{ default "" .Values.controller.jenkinsOpts}}
+ - name: JENKINS_SLAVE_AGENT_PORT
+ value: "{{ .Values.controller.agentListenerPort }}"
+ {{- if .Values.controller.httpsKeyStore.enable }}
+ - name: JENKINS_HTTPS_KEYSTORE_PASSWORD
+ {{- if not .Values.controller.httpsKeyStore.disableSecretMount }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ if .Values.controller.httpsKeyStore.jenkinsHttpsJksPasswordSecretName }} {{ .Values.controller.httpsKeyStore.jenkinsHttpsJksPasswordSecretName }} {{ else if .Values.controller.httpsKeyStore.jenkinsHttpsJksSecretName }} {{ .Values.controller.httpsKeyStore.jenkinsHttpsJksSecretName }} {{ else }} {{ template "jenkins.fullname" . }}-https-jks {{ end }}
+ key: "{{ .Values.controller.httpsKeyStore.jenkinsHttpsJksPasswordSecretKey }}"
+ {{- else }}
+ value: {{ .Values.controller.httpsKeyStore.password }}
+ {{- end }}
+ {{- end }}
+
+ - name: CASC_JENKINS_CONFIG
+ value: {{ .Values.controller.sidecars.configAutoReload.folder | default (printf "%s/casc_configs" (.Values.controller.jenkinsRef)) }}{{- if .Values.controller.JCasC.configUrls }},{{ join "," .Values.controller.JCasC.configUrls }}{{- end }}
+ ports:
+ {{- if .Values.controller.httpsKeyStore.enable }}
+ - containerPort: {{.Values.controller.httpsKeyStore.httpPort}}
+ {{- else }}
+ - containerPort: {{.Values.controller.targetPort}}
+ {{- end }}
+ name: http
+ - containerPort: {{ .Values.controller.agentListenerPort }}
+ name: agent-listener
+ {{- if .Values.controller.agentListenerHostPort }}
+ hostPort: {{ .Values.controller.agentListenerHostPort }}
+ {{- end }}
+ {{- if .Values.controller.jmxPort }}
+ - containerPort: {{ .Values.controller.jmxPort }}
+ name: jmx
+ {{- end }}
+{{- range $index, $port := .Values.controller.extraPorts }}
+ - containerPort: {{ $port.port }}
+ name: {{ $port.name }}
+{{- end }}
+{{- if and .Values.controller.healthProbes .Values.controller.probes}}
+ {{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }}
+ startupProbe:
+{{ tpl (toYaml .Values.controller.probes.startupProbe | indent 12) .}}
+ {{- end }}
+ livenessProbe:
+{{ tpl (toYaml .Values.controller.probes.livenessProbe | indent 12) .}}
+ readinessProbe:
+{{ tpl (toYaml .Values.controller.probes.readinessProbe | indent 12) .}}
+{{- end }}
+ resources:
+{{ toYaml .Values.controller.resources | indent 12 }}
+ volumeMounts:
+{{- if .Values.persistence.mounts }}
+{{ toYaml .Values.persistence.mounts | indent 12 }}
+{{- end }}
+ {{- if and .Values.controller.httpsKeyStore.enable (not .Values.controller.httpsKeyStore.disableSecretMount) }}
+ {{- $httpsJKSDirPath := printf "%s" .Values.controller.httpsKeyStore.path }}
+ - mountPath: {{ $httpsJKSDirPath }}
+ name: jenkins-https-keystore
+ {{- end }}
+ - mountPath: {{ .Values.controller.jenkinsHome }}
+ name: jenkins-home
+ readOnly: false
+ {{- if .Values.persistence.subPath }}
+ subPath: {{ .Values.persistence.subPath }}
+ {{- end }}
+ - mountPath: /var/jenkins_config
+ name: jenkins-config
+ readOnly: true
+ {{- if .Values.controller.installPlugins }}
+ - mountPath: {{ .Values.controller.jenkinsRef }}/plugins/
+ name: plugin-dir
+ readOnly: false
+ {{- end }}
+ {{- if or .Values.controller.initScripts .Values.controller.initConfigMap }}
+ - mountPath: {{ .Values.controller.jenkinsHome }}/init.groovy.d
+ name: init-scripts
+ {{- end }}
+ {{- if .Values.controller.sidecars.configAutoReload.enabled }}
+ - name: sc-config-volume
+ mountPath: {{ .Values.controller.sidecars.configAutoReload.folder | default (printf "%s/casc_configs" (.Values.controller.jenkinsRef)) }}
+ {{- end }}
+ {{- if or .Values.controller.additionalSecrets .Values.controller.existingSecret .Values.controller.additionalExistingSecrets .Values.controller.admin.createSecret }}
+ - name: jenkins-secrets
+ mountPath: /run/secrets/additional
+ readOnly: true
+ {{- end }}
+ - name: jenkins-cache
+ mountPath: /var/jenkins_cache
+ - mountPath: /tmp
+ name: tmp-volume
+
+{{- if .Values.controller.sidecars.configAutoReload.enabled }}
+{{- include "jenkins.configReloadContainer" (list $ "config-reload" "sidecar") | nindent 8 }}
+{{- end}}
+
+
+{{- if .Values.controller.sidecars.additionalSidecarContainers}}
+{{ tpl (toYaml .Values.controller.sidecars.additionalSidecarContainers | indent 8) .}}
+{{- end }}
+
+ volumes:
+{{- if .Values.persistence.volumes }}
+{{ tpl (toYaml .Values.persistence.volumes | indent 6) . }}
+{{- end }}
+ {{- if .Values.controller.installPlugins }}
+ {{- if .Values.controller.overwritePluginsFromImage }}
+ - name: plugins
+ emptyDir: {}
+ {{- end }}
+ {{- end }}
+ {{- if and .Values.controller.initScripts .Values.controller.initConfigMap }}
+ - name: init-scripts
+ projected:
+ sources:
+ - configMap:
+ name: {{ template "jenkins.fullname" . }}-init-scripts
+ - configMap:
+ name: {{ .Values.controller.initConfigMap }}
+ {{- else if .Values.controller.initConfigMap }}
+ - name: init-scripts
+ configMap:
+ name: {{ .Values.controller.initConfigMap }}
+ {{- else if .Values.controller.initScripts }}
+ - name: init-scripts
+ configMap:
+ name: {{ template "jenkins.fullname" . }}-init-scripts
+ {{- end }}
+ - name: jenkins-config
+ configMap:
+ name: {{ template "jenkins.fullname" . }}
+ {{- if .Values.controller.installPlugins }}
+ - name: plugin-dir
+ emptyDir: {}
+ {{- end }}
+ {{- if or .Values.controller.additionalSecrets .Values.controller.existingSecret .Values.controller.additionalExistingSecrets .Values.controller.admin.createSecret }}
+ - name: jenkins-secrets
+ projected:
+ sources:
+ {{- if .Values.controller.additionalSecrets }}
+ - secret:
+ name: {{ template "jenkins.fullname" . }}-additional-secrets
+ {{- end }}
+ {{- if .Values.controller.additionalExistingSecrets }}
+ {{- range $key, $value := .Values.controller.additionalExistingSecrets }}
+ - secret:
+ name: {{ tpl $value.name $ }}
+ items:
+ - key: {{ tpl $value.keyName $ }}
+ path: {{ tpl $value.name $ }}-{{ tpl $value.keyName $ }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.controller.admin.createSecret }}
+ - secret:
+ name: {{ .Values.controller.admin.existingSecret | default (include "jenkins.fullname" .) }}
+ items:
+ - key: {{ .Values.controller.admin.userKey | default "jenkins-admin-user" }}
+ path: chart-admin-username
+ - key: {{ .Values.controller.admin.passwordKey | default "jenkins-admin-password" }}
+ path: chart-admin-password
+ {{- end }}
+ {{- if .Values.controller.existingSecret }}
+ - secret:
+ name: {{ .Values.controller.existingSecret }}
+ {{- end }}
+ {{- end }}
+ - name: jenkins-cache
+ emptyDir: {}
+ {{- if not (contains "jenkins-home" (quote .Values.persistence.volumes)) }}
+ - name: jenkins-home
+ {{- if .Values.persistence.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ .Values.persistence.existingClaim | default (include "jenkins.fullname" .) }}
+ {{- else }}
+ emptyDir: {}
+ {{- end -}}
+ {{- end }}
+ - name: sc-config-volume
+ emptyDir: {}
+ - name: tmp-volume
+ emptyDir: {}
+
+ {{- if and .Values.controller.httpsKeyStore.enable (not .Values.controller.httpsKeyStore.disableSecretMount) }}
+ - name: jenkins-https-keystore
+ secret:
+ secretName: {{ if .Values.controller.httpsKeyStore.jenkinsHttpsJksSecretName }} {{ .Values.controller.httpsKeyStore.jenkinsHttpsJksSecretName }} {{ else }} {{ template "jenkins.fullname" . }}-https-jks {{ end }}
+ items:
+ - key: {{ .Values.controller.httpsKeyStore.jenkinsHttpsJksSecretKey }}
+ path: {{ .Values.controller.httpsKeyStore.fileName }}
+ {{- end }}
+
+{{- if .Values.controller.imagePullSecretName }}
+ imagePullSecrets:
+ - name: {{ .Values.controller.imagePullSecretName }}
+{{- end -}}
diff --git a/charts/jenkins/templates/jenkins-controller-svc.yaml b/charts/jenkins/templates/jenkins-controller-svc.yaml
new file mode 100644
index 0000000..a83466c
--- /dev/null
+++ b/charts/jenkins/templates/jenkins-controller-svc.yaml
@@ -0,0 +1,56 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{template "jenkins.fullname" . }}
+ namespace: {{ template "jenkins.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+ {{- if .Values.controller.serviceLabels }}
+{{ toYaml .Values.controller.serviceLabels | indent 4 }}
+ {{- end }}
+{{- if .Values.controller.serviceAnnotations }}
+ annotations:
+{{ toYaml .Values.controller.serviceAnnotations | indent 4 }}
+{{- end }}
+spec:
+ {{- if .Values.controller.serviceExternalTrafficPolicy }}
+ externalTrafficPolicy: {{.Values.controller.serviceExternalTrafficPolicy}}
+ {{- end }}
+ {{- if (and (eq .Values.controller.serviceType "ClusterIP") (not (empty .Values.controller.clusterIP))) }}
+ clusterIP: {{.Values.controller.clusterIP}}
+ {{- end }}
+ ports:
+ - port: {{.Values.controller.servicePort}}
+ name: http
+ targetPort: {{ .Values.controller.targetPort }}
+ {{- if (and (eq .Values.controller.serviceType "NodePort") (not (empty .Values.controller.nodePort))) }}
+ nodePort: {{.Values.controller.nodePort}}
+ {{- end }}
+{{- range $index, $port := .Values.controller.extraPorts }}
+ - port: {{ $port.port }}
+ name: {{ $port.name }}
+ {{- if $port.targetPort }}
+ targetPort: {{ $port.targetPort }}
+ {{- else }}
+ targetPort: {{ $port.port }}
+ {{- end -}}
+{{- end }}
+ selector:
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ type: {{.Values.controller.serviceType}}
+ {{if eq .Values.controller.serviceType "LoadBalancer"}}
+{{- if .Values.controller.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges:
+{{ toYaml .Values.controller.loadBalancerSourceRanges | indent 4 }}
+{{- end }}
+ {{if .Values.controller.loadBalancerIP}}
+ loadBalancerIP: {{.Values.controller.loadBalancerIP}}
+ {{end}}
+ {{end}}
diff --git a/charts/jenkins/templates/rbac.yaml b/charts/jenkins/templates/rbac.yaml
new file mode 100644
index 0000000..581cb8d
--- /dev/null
+++ b/charts/jenkins/templates/rbac.yaml
@@ -0,0 +1,149 @@
+{{ if .Values.rbac.create }}
+{{- $serviceName := include "jenkins.fullname" . -}}
+
+# This role is used to allow Jenkins scheduling of agents via Kubernetes plugin.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ $serviceName }}-schedule-agents
+ namespace: {{ template "jenkins.agent.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+rules:
+- apiGroups: [""]
+ resources: ["pods", "pods/exec", "pods/log", "persistentvolumeclaims", "events"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: [""]
+ resources: ["pods", "pods/exec", "persistentvolumeclaims"]
+ verbs: ["create", "delete", "deletecollection", "patch", "update"]
+
+---
+
+# We bind the role to the Jenkins service account. The role binding is created in the namespace
+# where the agents are supposed to run.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ $serviceName }}-schedule-agents
+ namespace: {{ template "jenkins.agent.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ $serviceName }}-schedule-agents
+subjects:
+- kind: ServiceAccount
+ name: {{ template "jenkins.serviceAccountName" .}}
+ namespace: {{ template "jenkins.namespace" . }}
+
+---
+
+{{- if .Values.rbac.readSecrets }}
+# This is needed if you want to use https://jenkinsci.github.io/kubernetes-credentials-provider-plugin/
+# as it needs permissions to get/watch/list Secrets
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ template "jenkins.fullname" . }}-read-secrets
+ namespace: {{ template "jenkins.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+rules:
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "watch", "list"]
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ $serviceName }}-read-secrets
+ namespace: {{ template "jenkins.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ template "jenkins.fullname" . }}-read-secrets
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "jenkins.serviceAccountName" . }}
+ namespace: {{ template "jenkins.namespace" . }}
+
+---
+{{- end}}
+
+{{- if .Values.controller.sidecars.configAutoReload.enabled }}
+# The sidecar container which is responsible for reloading configuration changes
+# needs permissions to watch ConfigMaps
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ template "jenkins.fullname" . }}-casc-reload
+ namespace: {{ template "jenkins.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+rules:
+- apiGroups: [""]
+ resources: ["configmaps"]
+ verbs: ["get", "watch", "list"]
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ $serviceName }}-watch-configmaps
+ namespace: {{ template "jenkins.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ template "jenkins.fullname" . }}-casc-reload
+subjects:
+- kind: ServiceAccount
+ name: {{ template "jenkins.serviceAccountName" . }}
+ namespace: {{ template "jenkins.namespace" . }}
+
+{{- end}}
+
+{{ end }}
diff --git a/charts/jenkins/templates/secret-additional.yaml b/charts/jenkins/templates/secret-additional.yaml
new file mode 100644
index 0000000..d1908aa
--- /dev/null
+++ b/charts/jenkins/templates/secret-additional.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.controller.additionalSecrets -}}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ template "jenkins.fullname" . }}-additional-secrets
+ namespace: {{ template "jenkins.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+type: Opaque
+data:
+{{- range .Values.controller.additionalSecrets }}
+ {{ .name }}: {{ .value | b64enc }}
+{{- end }}
+{{- end }}
diff --git a/charts/jenkins/templates/secret-claims.yaml b/charts/jenkins/templates/secret-claims.yaml
new file mode 100644
index 0000000..e8b6d6c
--- /dev/null
+++ b/charts/jenkins/templates/secret-claims.yaml
@@ -0,0 +1,29 @@
+{{- if .Values.controller.secretClaims -}}
+{{- $r := .Release -}}
+{{- $v := .Values -}}
+{{- $chart := printf "%s-%s" .Chart.Name .Chart.Version -}}
+{{- $namespace := include "jenkins.namespace" . -}}
+{{- $serviceName := include "jenkins.fullname" . -}}
+{{ range .Values.controller.secretClaims }}
+---
+kind: SecretClaim
+apiVersion: vaultproject.io/v1
+metadata:
+ name: {{ $serviceName }}-{{ .name | default .path | lower }}
+ namespace: {{ $namespace }}
+ labels:
+ "app.kubernetes.io/name": '{{ $serviceName }}'
+ {{- if $v.renderHelmLabels }}
+ "helm.sh/chart": "{{ $chart }}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ $r.Service }}"
+ "app.kubernetes.io/instance": "{{ $r.Name }}"
+ "app.kubernetes.io/component": "{{ $v.controller.componentName }}"
+spec:
+ type: {{ .type | default "Opaque" }}
+ path: {{ .path }}
+{{- if .renew }}
+ renew: {{ .renew }}
+{{- end }}
+{{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/jenkins/templates/secret-https-jks.yaml b/charts/jenkins/templates/secret-https-jks.yaml
new file mode 100644
index 0000000..5348de4
--- /dev/null
+++ b/charts/jenkins/templates/secret-https-jks.yaml
@@ -0,0 +1,20 @@
+{{- if and .Values.controller.httpsKeyStore.enable ( not .Values.controller.httpsKeyStore.jenkinsHttpsJksSecretName ) (not .Values.controller.httpsKeyStore.disableSecretMount) -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ template "jenkins.fullname" . }}-https-jks
+ namespace: {{ template "jenkins.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+type: Opaque
+data:
+ jenkins-jks-file: |
+{{ .Values.controller.httpsKeyStore.jenkinsKeyStoreBase64Encoded | indent 4 }}
+ https-jks-password: {{ .Values.controller.httpsKeyStore.password | b64enc }}
+{{- end }}
diff --git a/charts/jenkins/templates/secret.yaml b/charts/jenkins/templates/secret.yaml
new file mode 100644
index 0000000..cc6ace1
--- /dev/null
+++ b/charts/jenkins/templates/secret.yaml
@@ -0,0 +1,20 @@
+{{- if and (not .Values.controller.admin.existingSecret) (.Values.controller.admin.createSecret) -}}
+
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ template "jenkins.fullname" . }}
+ namespace: {{ template "jenkins.namespace" . }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+type: Opaque
+data:
+ jenkins-admin-password: {{ template "jenkins.password" . }}
+ jenkins-admin-user: {{ .Values.controller.admin.username | b64enc | quote }}
+{{- end }}
diff --git a/charts/jenkins/templates/service-account-agent.yaml b/charts/jenkins/templates/service-account-agent.yaml
new file mode 100644
index 0000000..48f08ba
--- /dev/null
+++ b/charts/jenkins/templates/service-account-agent.yaml
@@ -0,0 +1,26 @@
+{{ if .Values.serviceAccountAgent.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "jenkins.serviceAccountAgentName" . }}
+ namespace: {{ template "jenkins.agent.namespace" . }}
+{{- if .Values.serviceAccountAgent.annotations }}
+ annotations:
+{{ tpl (toYaml .Values.serviceAccountAgent.annotations) . | indent 4 }}
+{{- end }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+{{- if .Values.serviceAccountAgent.extraLabels }}
+{{ tpl (toYaml .Values.serviceAccountAgent.extraLabels) . | indent 4 }}
+{{- end }}
+{{- if .Values.serviceAccountAgent.imagePullSecretName }}
+imagePullSecrets:
+ - name: {{ .Values.serviceAccountAgent.imagePullSecretName }}
+{{- end -}}
+{{ end }}
diff --git a/charts/jenkins/templates/service-account.yaml b/charts/jenkins/templates/service-account.yaml
new file mode 100644
index 0000000..b44eb48
--- /dev/null
+++ b/charts/jenkins/templates/service-account.yaml
@@ -0,0 +1,26 @@
+{{ if .Values.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "jenkins.serviceAccountName" . }}
+ namespace: {{ template "jenkins.namespace" . }}
+{{- if .Values.serviceAccount.annotations }}
+ annotations:
+{{ tpl (toYaml .Values.serviceAccount.annotations) . | indent 4 }}
+{{- end }}
+ labels:
+ "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
+ {{- if .Values.renderHelmLabels }}
+ "helm.sh/chart": "{{ template "jenkins.label" .}}"
+ {{- end }}
+ "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
+ "app.kubernetes.io/instance": "{{ .Release.Name }}"
+ "app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
+{{- if .Values.serviceAccount.extraLabels }}
+{{ tpl (toYaml .Values.serviceAccount.extraLabels) . | indent 4 }}
+{{- end }}
+{{- if .Values.serviceAccount.imagePullSecretName }}
+imagePullSecrets:
+ - name: {{ .Values.serviceAccount.imagePullSecretName }}
+{{- end -}}
+{{ end }}
diff --git a/charts/jenkins/templates/tests/jenkins-test.yaml b/charts/jenkins/templates/tests/jenkins-test.yaml
new file mode 100644
index 0000000..12a935e
--- /dev/null
+++ b/charts/jenkins/templates/tests/jenkins-test.yaml
@@ -0,0 +1,49 @@
+{{- if .Values.controller.testEnabled }}
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ .Release.Name }}-ui-test-{{ randAlphaNum 5 | lower }}"
+ namespace: {{ template "jenkins.namespace" . }}
+ annotations:
+ "helm.sh/hook": test-success
+spec:
+ {{- if .Values.controller.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.controller.nodeSelector | indent 4 }}
+ {{- end }}
+ {{- if .Values.controller.tolerations }}
+ tolerations:
+{{ toYaml .Values.controller.tolerations | indent 4 }}
+ {{- end }}
+ initContainers:
+ - name: "test-framework"
+ image: "{{ .Values.helmtest.bats.image.registry }}/{{ .Values.helmtest.bats.image.repository }}:{{ .Values.helmtest.bats.image.tag }}"
+ command:
+ - "bash"
+ - "-c"
+ args:
+ - |
+ # copy bats to tools dir
+ set -ex
+ cp -R /opt/bats /tools/bats/
+ volumeMounts:
+ - mountPath: /tools
+ name: tools
+ containers:
+ - name: {{ .Release.Name }}-ui-test
+ image: "{{ .Values.controller.image.registry }}/{{ .Values.controller.image.repository }}:{{- include "controller.image.tag" . -}}"
+ command: ["/tools/bats/bin/bats", "-t", "/tests/run.sh"]
+ volumeMounts:
+ - mountPath: /tests
+ name: tests
+ readOnly: true
+ - mountPath: /tools
+ name: tools
+ volumes:
+ - name: tests
+ configMap:
+ name: {{ template "jenkins.fullname" . }}-tests
+ - name: tools
+ emptyDir: {}
+ restartPolicy: Never
+{{- end }}
diff --git a/charts/jenkins/templates/tests/test-config.yaml b/charts/jenkins/templates/tests/test-config.yaml
new file mode 100644
index 0000000..12c5b3a
--- /dev/null
+++ b/charts/jenkins/templates/tests/test-config.yaml
@@ -0,0 +1,14 @@
+{{- if .Values.controller.testEnabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "jenkins.fullname" . }}-tests
+ namespace: {{ template "jenkins.namespace" . }}
+ annotations:
+ "helm.sh/hook": test
+data:
+ run.sh: |-
+ @test "Testing Jenkins UI is accessible" {
+ curl --retry 48 --retry-delay 10 {{ template "jenkins.fullname" . }}:{{ .Values.controller.servicePort }}{{ default "" .Values.controller.jenkinsUriPrefix }}/login
+ }
+{{- end }}
diff --git a/charts/jenkins/values.yaml b/charts/jenkins/values.yaml
new file mode 100644
index 0000000..754a01c
--- /dev/null
+++ b/charts/jenkins/values.yaml
@@ -0,0 +1,1306 @@
+# Default values for jenkins.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name: value
+
+## Overrides for generated resource names
+# See templates/_helpers.tpl
+# -- Override the resource name prefix
+# @default -- `Chart.Name`
+nameOverride:
+# -- Override the full resource names
+# @default -- `jenkins-(release-name)` or `jenkins` if the release-name is `jenkins`
+fullnameOverride:
+# -- Override the deployment namespace
+# @default -- `Release.Namespace`
+namespaceOverride:
+
+# For FQDN resolving of the controller service. Change this value to match your existing configuration.
+# ref: https://github.com/kubernetes/dns/blob/master/docs/specification.md
+# -- Override the cluster name for FQDN resolving
+clusterZone: "cluster.local"
+
+# -- The URL of the Kubernetes API server
+kubernetesURL: "https://kubernetes.default"
+
+# -- The Jenkins credentials to access the Kubernetes API server. For the default cluster it is not needed.
+credentialsId:
+
+# -- Enables rendering of the helm.sh/chart label to the annotations
+renderHelmLabels: true
+
+controller:
+ # -- Used for label app.kubernetes.io/component
+ componentName: "jenkins-controller"
+ image:
+ # -- Controller image registry
+ registry: "docker.io"
+ # -- Controller image repository
+ repository: "jenkins/jenkins"
+
+ # -- Controller image tag override; i.e., tag: "2.440.1-jdk17"
+ tag:
+
+ # -- Controller image tag label
+ tagLabel: jdk17
+ # -- Controller image pull policy
+ pullPolicy: "Always"
+ # -- Controller image pull secret
+ imagePullSecretName:
+ # -- Lifecycle specification for controller-container
+ lifecycle: {}
+ # postStart:
+ # exec:
+ # command:
+ # - "uname"
+ # - "-a"
+
+ # -- Disable use of remember me
+ disableRememberMe: false
+
+ # -- Set Number of executors
+ numExecutors: 0
+
+ # -- Sets the executor mode of the Jenkins node. Possible values are "NORMAL" or "EXCLUSIVE"
+ executorMode: "NORMAL"
+
+ # -- Append Jenkins labels to the controller
+ customJenkinsLabels: []
+
+ hostNetworking: false
+
+ # When enabling LDAP or another non-Jenkins identity source, the built-in admin account will no longer exist.
+ # If you disable the non-Jenkins identity store and instead use the Jenkins internal one,
+ # you should revert controller.admin.username to your preferred admin user:
+ admin:
+
+ # -- Admin username created as a secret if `controller.admin.createSecret` is true
+ username: "admin"
+ # -- Admin password created as a secret if `controller.admin.createSecret` is true
+ # @default -- <random password>
+ password:
+
+ # -- The key in the existing admin secret containing the username
+ userKey: jenkins-admin-user
+ # -- The key in the existing admin secret containing the password
+ passwordKey: jenkins-admin-password
+
+ # The default configuration uses this secret to configure an admin user
+ # If you don't need that user or use a different security realm, then you can disable it
+ # -- Create secret for admin user
+ createSecret: true
+
+ # -- The name of an existing secret containing the admin credentials
+ existingSecret: ""
+ # -- Email address for the administrator of the Jenkins instance
+ jenkinsAdminEmail:
+
+ # This value should not be changed unless you use your custom image of jenkins or any derived from.
+ # If you want to use Cloudbees Jenkins Distribution docker, you should set jenkinsHome: "/var/cloudbees-jenkins-distribution"
+ # -- Custom Jenkins home path
+ jenkinsHome: "/var/jenkins_home"
+
+ # This value should not be changed unless you use your custom image of jenkins or any derived from.
+ # If you want to use Cloudbees Jenkins Distribution docker, you should set jenkinsRef: "/usr/share/cloudbees-jenkins-distribution/ref"
+ # -- Custom Jenkins reference path
+ jenkinsRef: "/usr/share/jenkins/ref"
+
+ # Path to the jenkins war file which is used by jenkins-plugin-cli.
+ jenkinsWar: "/usr/share/jenkins/jenkins.war"
+ # Override the default arguments passed to the war
+ # overrideArgs:
+ # - --httpPort=8080
+
+ # -- Resource allocation (Requests and Limits)
+ resources:
+ requests:
+ cpu: "50m"
+ memory: "256Mi"
+ limits:
+ cpu: "2000m"
+ memory: "4096Mi"
+
+ # Share process namespace to allow sidecar containers to interact with processes in other containers in the same pod
+ shareProcessNamespace: false
+
+ # Overrides the init container default values
+ # -- Resources allocation (Requests and Limits) for Init Container
+ initContainerResources: {}
+ # initContainerResources:
+ # requests:
+ # cpu: "50m"
+ # memory: "256Mi"
+ # limits:
+ # cpu: "2000m"
+ # memory: "4096Mi"
+ # -- Environment variable sources for Init Container
+ initContainerEnvFrom: []
+
+ # useful for i.e., http_proxy
+ # -- Environment variables for Init Container
+ initContainerEnv: []
+ # initContainerEnv:
+ # - name: http_proxy
+ # value: "http://192.168.64.1:3128"
+
+ # -- Environment variable sources for Jenkins Container
+ containerEnvFrom: []
+
+ # -- Environment variables for Jenkins Container
+ containerEnv: []
+ # - name: http_proxy
+ # value: "http://192.168.64.1:3128"
+
+ # Set min/max heap here if needed with "-Xms512m -Xmx512m"
+ # -- Append to `JAVA_OPTS` env var
+ javaOpts:
+ # -- Append to `JENKINS_OPTS` env var
+ jenkinsOpts:
+
+ # If you are using the ingress definitions provided by this chart via the `controller.ingress` block,
+ # the configured hostname will be the ingress hostname starting with `https://`
+ # or `http://` depending on the `tls` configuration.
+ # The Protocol can be overwritten by specifying `controller.jenkinsUrlProtocol`.
+ # -- Set protocol for Jenkins URL; `https` if `controller.ingress.tls`, `http` otherwise
+ jenkinsUrlProtocol:
+
+ # -- Set Jenkins URL if you are not using the ingress definitions provided by the chart
+ jenkinsUrl:
+
+ # If you set this prefix and use ingress controller, then you might want to set the ingress path below
+ # I.e., "/jenkins"
+ # -- Root URI Jenkins will be served on
+ jenkinsUriPrefix:
+
+ # -- Enable pod security context (must be `true` if podSecurityContextOverride, runAsUser or fsGroup are set)
+ usePodSecurityContext: true
+
+ # Note that `runAsUser`, `fsGroup`, and `securityContextCapabilities` are
+ # being deprecated and replaced by `podSecurityContextOverride`.
+ # Set runAsUser to 1000 to let Jenkins run as non-root user 'jenkins', which exists in 'jenkins/jenkins' docker image.
+ # When configuring runAsUser to a different value than 0 also set fsGroup to the same value:
+ # -- Deprecated in favor of `controller.podSecurityContextOverride`. uid that jenkins runs with.
+ runAsUser: 1000
+
+ # -- Deprecated in favor of `controller.podSecurityContextOverride`. uid that will be used for persistent volume.
+ fsGroup: 1000
+
+ # If you have PodSecurityPolicies that require dropping of capabilities as suggested by CIS K8s benchmark, put them here
+ # securityContextCapabilities:
+ # drop:
+ # - NET_RAW
+ securityContextCapabilities: {}
+
+ # In the case of mounting an ext4 filesystem, it might be desirable to use `supplementalGroups` instead of `fsGroup` in
+ # the `securityContext` block: https://github.com/kubernetes/kubernetes/issues/67014#issuecomment-589915496
+ # podSecurityContextOverride:
+ # runAsUser: 1000
+ # runAsNonRoot: true
+ # supplementalGroups: [1000]
+ # capabilities: {}
+ # -- Completely overwrites the contents of the pod security context, ignoring the values provided for `runAsUser`, `fsGroup`, and `securityContextCapabilities`
+ podSecurityContextOverride: ~
+
+ # -- Allow controlling the securityContext for the jenkins container
+ containerSecurityContext:
+ runAsUser: 1000
+ runAsGroup: 1000
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+
+ # For minikube, set this to NodePort, elsewhere uses LoadBalancer
+ # Use ClusterIP if your setup includes ingress controller
+ # -- k8s service type
+ serviceType: ClusterIP
+
+ # -- k8s service clusterIP. Only used if serviceType is ClusterIP
+ clusterIp:
+ # -- k8s service port
+ servicePort: 8080
+ # -- k8s target port
+ targetPort: 8080
+ # -- k8s node port. Only used if serviceType is NodePort
+ nodePort:
+
+ # Use Local to preserve the client source IP and avoids a second hop for LoadBalancer and NodePort type services,
+ # but risks potentially imbalanced traffic spreading.
+ serviceExternalTrafficPolicy:
+
+ # -- Jenkins controller service annotations
+ serviceAnnotations: {}
+ # -- Jenkins controller custom labels for the StatefulSet
+ statefulSetLabels: {}
+ # foo: bar
+ # bar: foo
+ # -- Labels for the Jenkins controller-service
+ serviceLabels: {}
+ # service.beta.kubernetes.io/aws-load-balancer-backend-protocol: https
+
+ # Put labels on Jenkins controller pod
+ # -- Custom Pod labels (an object with `label-key: label-value` pairs)
+ podLabels: {}
+
+ # Enable Kubernetes Startup, Liveness and Readiness Probes
+ # if Startup Probe is supported, enable it too
+ # ~ 2 minutes to allow Jenkins to restart when upgrading plugins. Set ReadinessTimeout to be shorter than LivenessTimeout.
+ # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes
+ # -- Enable Kubernetes Probes configuration configured in `controller.probes`
+ healthProbes: true
+
+ probes:
+ startupProbe:
+ # -- Set the failure threshold for the startup probe
+ failureThreshold: 12
+ httpGet:
+ # -- Set the Pod's HTTP path for the startup probe
+ path: '{{ default "" .Values.controller.jenkinsUriPrefix }}/login'
+ # -- Set the Pod's HTTP port to use for the startup probe
+ port: http
+ # -- Set the time interval between two startup probes executions in seconds
+ periodSeconds: 10
+ # -- Set the timeout for the startup probe in seconds
+ timeoutSeconds: 5
+
+ livenessProbe:
+ # -- Set the failure threshold for the liveness probe
+ failureThreshold: 5
+ httpGet:
+ # -- Set the Pod's HTTP path for the liveness probe
+ path: '{{ default "" .Values.controller.jenkinsUriPrefix }}/login'
+ # -- Set the Pod's HTTP port to use for the liveness probe
+ port: http
+ # -- Set the time interval between two liveness probes executions in seconds
+ periodSeconds: 10
+ # -- Set the timeout for the liveness probe in seconds
+ timeoutSeconds: 5
+
+ # If Startup Probe is not supported on your Kubernetes cluster, you might want to use "initialDelaySeconds" instead.
+ # It delays the initial liveness probe while Jenkins is starting
+ # -- Set the initial delay for the liveness probe in seconds
+ initialDelaySeconds:
+
+ readinessProbe:
+ # -- Set the failure threshold for the readiness probe
+ failureThreshold: 3
+ httpGet:
+ # -- Set the Pod's HTTP path for the liveness probe
+ path: '{{ default "" .Values.controller.jenkinsUriPrefix }}/login'
+ # -- Set the Pod's HTTP port to use for the readiness probe
+ port: http
+ # -- Set the time interval between two readiness probes executions in seconds
+ periodSeconds: 10
+ # -- Set the timeout for the readiness probe in seconds
+ timeoutSeconds: 5
+
+ # If Startup Probe is not supported on your Kubernetes cluster, you might want to use "initialDelaySeconds" instead.
+ # It delays the initial readiness probe while Jenkins is starting
+ # -- Set the initial delay for the readiness probe in seconds
+ initialDelaySeconds:
+
+ # PodDisruptionBudget config
+ podDisruptionBudget:
+ # ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
+
+ # -- Enable Kubernetes Pod Disruption Budget configuration
+ enabled: false
+
+ # For Kubernetes v1.5+, use 'policy/v1beta1'
+ # For Kubernetes v1.21+, use 'policy/v1'
+ # -- Policy API version
+ apiVersion: "policy/v1beta1"
+
+ annotations: {}
+ labels: {}
+ # -- Number of pods that can be unavailable. Either an absolute number or a percentage
+ maxUnavailable: "0"
+
+ # -- Create Agent listener service
+ agentListenerEnabled: true
+ # -- Listening port for agents
+ agentListenerPort: 50000
+ # -- Host port to listen for agents
+ agentListenerHostPort:
+ # -- Node port to listen for agents
+ agentListenerNodePort:
+
+ # ref: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies
+ # -- Traffic Policy of for the agentListener service
+ agentListenerExternalTrafficPolicy:
+ # -- Allowed inbound IP for the agentListener service
+ agentListenerLoadBalancerSourceRanges:
+ - 0.0.0.0/0
+ # -- Disabled agent protocols
+ disabledAgentProtocols:
+ - JNLP-connect
+ - JNLP2-connect
+ csrf:
+ defaultCrumbIssuer:
+ # -- Enable the default CSRF Crumb issuer
+ enabled: true
+ # -- Enable proxy compatibility
+ proxyCompatability: true
+
+ # Kubernetes service type for the JNLP agent service
+ # agentListenerServiceType is the Kubernetes Service type for the JNLP agent service,
+ # either 'LoadBalancer', 'NodePort', or 'ClusterIP'
+ # Note if you set this to 'LoadBalancer', you *must* define annotations to secure it. By default,
+ # this will be an external load balancer and allowing inbound 0.0.0.0/0, a HUGE
+ # security risk: https://github.com/kubernetes/charts/issues/1341
+ # -- Defines how to expose the agentListener service
+ agentListenerServiceType: "ClusterIP"
+
+ # -- Annotations for the agentListener service
+ agentListenerServiceAnnotations: {}
+
+ # Optionally, assign an IP to the LoadBalancer agentListenerService LoadBalancer
+ # GKE users: only regional static IPs will work for Service Load balancer.
+ # -- Static IP for the agentListener LoadBalancer
+ agentListenerLoadBalancerIP:
+
+ # -- Whether legacy remoting security should be enabled
+ legacyRemotingSecurityEnabled: false
+
+ # Example of a 'LoadBalancer'-type agent listener with annotations securing it
+ # agentListenerServiceType: LoadBalancer
+ # agentListenerServiceAnnotations:
+ # service.beta.kubernetes.io/aws-load-balancer-internal: "True"
+ # service.beta.kubernetes.io/load-balancer-source-ranges: "172.0.0.0/8, 10.0.0.0/8"
+
+ # LoadBalancerSourcesRange is a list of allowed CIDR values, which are combined with ServicePort to
+ # set allowed inbound rules on the security group assigned to the controller load balancer
+ # -- Allowed inbound IP addresses
+ loadBalancerSourceRanges:
+ - 0.0.0.0/0
+
+ # -- Optionally assign a known public LB IP
+ loadBalancerIP:
+
+ # Optionally configure a JMX port. This requires additional javaOpts, for example,
+ # javaOpts: >
+ # -Dcom.sun.management.jmxremote.port=4000
+ # -Dcom.sun.management.jmxremote.authenticate=false
+ # -Dcom.sun.management.jmxremote.ssl=false
+ # jmxPort: 4000
+ # -- Open a port, for JMX stats
+ jmxPort:
+
+ # -- Optionally configure other ports to expose in the controller container
+ extraPorts: []
+ # - name: BuildInfoProxy
+ # port: 9000
+ # targetPort: 9010 (Optional: Use to explicitly set targetPort if different from port)
+
+ # Plugins will be installed during Jenkins controller start
+ # -- List of Jenkins plugins to install. If you don't want to install plugins, set it to `false`
+ installPlugins:
+ - kubernetes:4203.v1dd44f5b_1cf9
+ - workflow-aggregator:596.v8c21c963d92d
+ - git:5.2.1
+ - configuration-as-code:1810.v9b_c30a_249a_4c
+
+ # If set to false, Jenkins will download the minimum required version of all dependencies.
+ # -- Download the minimum required version or latest version of all dependencies
+ installLatestPlugins: true
+
+ # -- Set to true to download the latest version of any plugin that is requested to have the latest version
+ installLatestSpecifiedPlugins: false
+
+ # -- List of plugins to install in addition to those listed in controller.installPlugins
+ additionalPlugins: []
+
+ # Without this; whenever the controller gets restarted (Evicted, etc.) it will fetch plugin updates that have the potential to cause breakage.
+ # Note that for this to work, `persistence.enabled` needs to be set to `true`
+ # -- Initialize only on first installation. Ensures plugins do not get updated inadvertently. Requires `persistence.enabled` to be set to `true`
+ initializeOnce: false
+
+ # Enable to always override the installed plugins with the values of 'controller.installPlugins' on upgrade or redeployment.
+ # -- Overwrite installed plugins on start
+ overwritePlugins: false
+
+ # Configures if plugins bundled with `controller.image` should be overwritten with the values of 'controller.installPlugins' on upgrade or redeployment.
+ # -- Overwrite plugins that are already installed in the controller image
+ overwritePluginsFromImage: true
+
+ # Configures the restrictions for naming projects. Set this key to null or empty to skip it in the default config.
+ projectNamingStrategy: standard
+
+ # Useful with ghprb plugin. The OWASP plugin is not installed by default, please update controller.installPlugins.
+ # -- Enable HTML parsing using OWASP Markup Formatter Plugin (antisamy-markup-formatter)
+ enableRawHtmlMarkupFormatter: false
+
+ # This is ignored if enableRawHtmlMarkupFormatter is true
+ # -- Yaml of the markup formatter to use
+ markupFormatter: plainText
+
+ # Used to approve a list of groovy functions in pipelines used the script-security plugin. Can be viewed under /scriptApproval
+ # -- List of groovy functions to approve
+ scriptApproval: []
+ # - "method groovy.json.JsonSlurperClassic parseText java.lang.String"
+ # - "new groovy.json.JsonSlurperClassic"
+
+ # -- Map of groovy init scripts to be executed during Jenkins controller start
+ initScripts: {}
+ # test: |-
+ # print 'adding global pipeline libraries, register properties, bootstrap jobs...'
+ # -- Name of the existing ConfigMap that contains init scripts
+ initConfigMap:
+
+ # 'name' is a name of an existing secret in the same namespace as jenkins,
+ # 'keyName' is the name of one of the keys inside the current secret.
+ # the 'name' and 'keyName' are concatenated with a '-' in between, so for example:
+ # an existing secret "secret-credentials" and a key inside it named "github-password" should be used in JCasC as ${secret-credentials-github-password}
+ # 'name' and 'keyName' must be lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-',
+ # and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc')
+ # existingSecret existing secret "secret-credentials" and a key inside it named "github-username" should be used in JCasC as ${github-username}
+ # When using existingSecret no need to specify the keyName under additionalExistingSecrets.
+ existingSecret:
+
+ # -- List of additional existing secrets to mount
+ additionalExistingSecrets: []
+ # ref: https://github.com/jenkinsci/configuration-as-code-plugin/blob/master/docs/features/secrets.adoc#kubernetes-secrets
+ # additionalExistingSecrets:
+ # - name: secret-name-1
+ # keyName: username
+ # - name: secret-name-1
+ # keyName: password
+
+ # -- List of additional secrets to create and mount
+ additionalSecrets: []
+ # ref: https://github.com/jenkinsci/configuration-as-code-plugin/blob/master/docs/features/secrets.adoc#kubernetes-secrets
+ # additionalSecrets:
+ # - name: nameOfSecret
+ # value: secretText
+
+ # Generate SecretClaim resources to create Kubernetes secrets from HashiCorp Vault using kube-vault-controller.
+ # 'name' is the name of the secret that will be created in Kubernetes. The Jenkins fullname is prepended to this value.
+ # 'path' is the fully qualified path to the secret in Vault
+ # 'type' is an optional Kubernetes secret type. The default is 'Opaque'
+ # 'renew' is an optional secret renewal time in seconds
+ # -- List of `SecretClaim` resources to create
+ secretClaims: []
+ # - name: secretName # required
+ # path: testPath # required
+ # type: kubernetes.io/tls # optional
+ # renew: 60 # optional
+
+ # -- Name of default cloud configuration.
+ cloudName: "kubernetes"
+
+ # Below is the implementation of Jenkins Configuration as Code. Add a key under configScripts for each configuration area,
+ # where each corresponds to a plugin or section of the UI. Each key (prior to | character) is just a label, and can be any value.
+ # Keys are only used to give the section a meaningful name. The only restriction is they may only contain RFC 1123 \ DNS label
+ # characters: lowercase letters, numbers, and hyphens. The keys become the name of a configuration yaml file on the controller in
+ # /var/jenkins_home/casc_configs (by default) and will be processed by the Configuration as Code Plugin. The lines after each |
+ # become the content of the configuration yaml file. The first line after this is a JCasC root element, e.g., jenkins, credentials,
+ # etc. Best reference is https://<jenkins_url>/configuration-as-code/reference. The example below creates a welcome message:
+ JCasC:
+ # -- Enables default Jenkins configuration via configuration as code plugin
+ defaultConfig: true
+
+ # If true, the init container deletes all the plugin config files and Jenkins Config as Code overwrites any existing configuration
+ # -- Whether Jenkins Config as Code should overwrite any existing configuration
+ overwriteConfiguration: false
+ # -- Remote URLs for configuration files.
+ configUrls: []
+ # - https://acme.org/jenkins.yaml
+ # -- List of Jenkins Config as Code scripts
+ configScripts: {}
+ # welcome-message: |
+ # jenkins:
+ # systemMessage: Welcome to our CI\CD server. This Jenkins is configured and managed 'as code'.
+
+ # Allows adding to the top-level security JCasC section. For legacy purposes, by default, the chart includes apiToken configurations
+ # -- Jenkins Config as Code security-section
+ security:
+ apiToken:
+ creationOfLegacyTokenEnabled: false
+ tokenGenerationOnCreationEnabled: false
+ usageStatisticsEnabled: true
+
+ # Ignored if securityRealm is defined in controller.JCasC.configScripts
+ # -- Jenkins Config as Code Security Realm-section
+ securityRealm: |-
+ local:
+ allowsSignup: false
+ enableCaptcha: false
+ users:
+ - id: "${chart-admin-username}"
+ name: "Jenkins Admin"
+ password: "${chart-admin-password}"
+
+ # Ignored if authorizationStrategy is defined in controller.JCasC.configScripts
+ # -- Jenkins Config as Code Authorization Strategy-section
+ authorizationStrategy: |-
+ loggedInUsersCanDoAnything:
+ allowAnonymousRead: false
+ # -- Custom init-container specification in raw-yaml format
+ customInitContainers: []
+ # - name: custom-init
+ # image: "alpine:3"
+ # imagePullPolicy: Always
+ # command: [ "uname", "-a" ]
+
+ sidecars:
+ configAutoReload:
+ # If enabled: true, Jenkins Configuration as Code will be reloaded on-the-fly without a reboot.
+ # If false or not-specified, JCasC changes will cause a reboot and will only be applied at the subsequent start-up.
+ # Auto-reload uses the http://<jenkins_url>/reload-configuration-as-code endpoint to reapply config when changes to
+ # the configScripts are detected.
+ # -- Enables Jenkins Config as Code auto-reload
+ enabled: true
+ image:
+ # -- Registry for the image that triggers the reload
+ registry: docker.io
+ # -- Repository of the image that triggers the reload
+ repository: kiwigrid/k8s-sidecar
+ # -- Tag for the image that triggers the reload
+ tag: 1.26.1
+ imagePullPolicy: IfNotPresent
+ resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 100Mi
+ # requests:
+ # cpu: 50m
+ # memory: 50Mi
+
+ # -- The scheme to use when connecting to the Jenkins configuration as code endpoint
+ scheme: http
+ # -- Skip TLS verification when connecting to the Jenkins configuration as code endpoint
+ skipTlsVerify: false
+
+ # -- How many connection-related errors to retry on
+ reqRetryConnect: 10
+ # -- How many seconds to wait before updating config-maps/secrets (sets METHOD=SLEEP on the sidecar)
+ sleepTime:
+
+ # -- Environment variable sources for the Jenkins Config as Code auto-reload container
+ envFrom: []
+ # -- Environment variables for the Jenkins Config as Code auto-reload container
+ env: {}
+ # - name: REQ_TIMEOUT
+ # value: "30"
+
+ # SSH port value can be set to any unused TCP port. The default, 1044, is a non-standard SSH port that has been chosen at random.
+ # This is only used to reload JCasC config from the sidecar container running in the Jenkins controller pod.
+ # This TCP port will not be open in the pod (unless you specifically configure this), so Jenkins will not be
+ # accessible via SSH from outside the pod. Note if you use non-root pod privileges (runAsUser & fsGroup),
+ # this must be > 1024:
+ sshTcpPort: 1044
+ # folder in the pod that should hold the collected dashboards:
+ folder: "/var/jenkins_home/casc_configs"
+
+ # If specified, the sidecar will search for JCasC config-maps inside this namespace.
+ # Otherwise, the namespace in which the sidecar is running will be used.
+ # It's also possible to specify ALL to search in all namespaces:
+ # searchNamespace:
+ # -- Enable container security context
+ containerSecurityContext:
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+
+ # -- Configures additional sidecar container(s) for the Jenkins controller
+ additionalSidecarContainers: []
+ ## The example below runs the client for https://smee.io as sidecar container next to Jenkins,
+ ## that allows triggering build behind a secure firewall.
+ ## https://jenkins.io/blog/2019/01/07/webhook-firewalls/#triggering-builds-with-webhooks-behind-a-secure-firewall
+ ##
+ ## Note: To use it you should go to https://smee.io/new and update the url to the generated one.
+ # - name: smee
+ # image: docker.io/twalter/smee-client:1.0.2
+ # args: ["--port", "{{ .Values.controller.servicePort }}", "--path", "/github-webhook/", "--url", "https://smee.io/new"]
+ # resources:
+ # limits:
+ # cpu: 50m
+ # memory: 128Mi
+ # requests:
+ # cpu: 10m
+ # memory: 32Mi
+
+ # -- Name of the Kubernetes scheduler to use
+ schedulerName: ""
+
+ # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+ # -- Node labels for pod assignment
+ nodeSelector: {}
+
+ # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
+ # -- Toleration labels for pod assignment
+ tolerations: []
+ # -- Set TerminationGracePeriodSeconds
+ terminationGracePeriodSeconds:
+ # -- Set the termination message path
+ terminationMessagePath:
+ # -- Set the termination message policy
+ terminationMessagePolicy:
+
+ # -- Affinity settings
+ affinity: {}
+
+ # Leverage a priorityClass to ensure your pods survive resource shortages
+ # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
+ # -- The name of a `priorityClass` to apply to the controller pod
+ priorityClassName:
+
+ # -- Annotations for controller pod
+ podAnnotations: {}
+ # -- Annotations for controller StatefulSet
+ statefulSetAnnotations: {}
+
+ # ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+ # -- Update strategy for StatefulSet
+ updateStrategy: {}
+
+ ingress:
+ # -- Enables ingress
+ enabled: false
+
+ # Override for the default paths that map requests to the backend
+ # -- Override for the default Ingress paths
+ paths: []
+ # - backend:
+ # serviceName: ssl-redirect
+ # servicePort: use-annotation
+ # - backend:
+ # serviceName: >-
+ # {{ template "jenkins.fullname" . }}
+ # # Don't use string here, use only integer value!
+ # servicePort: 8080
+
+ # For Kubernetes v1.14+, use 'networking.k8s.io/v1beta1'
+ # For Kubernetes v1.19+, use 'networking.k8s.io/v1'
+ # -- Ingress API version
+ apiVersion: "extensions/v1beta1"
+ # -- Ingress labels
+ labels: {}
+ # -- Ingress annotations
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
+ # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
+ # ingressClassName: nginx
+
+ # Set this path to jenkinsUriPrefix above or use annotations to rewrite path
+ # -- Ingress path
+ path:
+
+ # configures the hostname e.g. jenkins.example.com
+ # -- Ingress hostname
+ hostName:
+ # -- Hostname to serve assets from
+ resourceRootUrl:
+ # -- Ingress TLS configuration
+ tls: []
+ # - secretName: jenkins.cluster.local
+ # hosts:
+ # - jenkins.cluster.local
+
+ # often you want to have your controller all locked down and private,
+ # but you still want to get webhooks from your SCM
+ # A secondary ingress will let you expose different urls
+ # with a different configuration
+ secondaryingress:
+ enabled: false
+ # paths you want forwarded to the backend
+ # ex /github-webhook
+ paths: []
+ # For Kubernetes v1.14+, use 'networking.k8s.io/v1beta1'
+ # For Kubernetes v1.19+, use 'networking.k8s.io/v1'
+ apiVersion: "extensions/v1beta1"
+ labels: {}
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
+ # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
+ # ingressClassName: nginx
+ # configures the hostname e.g., jenkins-external.example.com
+ hostName:
+ tls:
+ # - secretName: jenkins-external.example.com
+ # hosts:
+ # - jenkins-external.example.com
+
+ # If you're running on GKE and need to configure a backendconfig
+ # to finish ingress setup, use the following values.
+ # Docs: https://cloud.google.com/kubernetes-engine/docs/concepts/backendconfig
+ backendconfig:
+ # -- Enables backendconfig
+ enabled: false
+ # -- backendconfig API version
+ apiVersion: "extensions/v1beta1"
+ # -- backendconfig name
+ name:
+ # -- backendconfig labels
+ labels: {}
+ # -- backendconfig annotations
+ annotations: {}
+ # -- backendconfig spec
+ spec: {}
+
+ # Openshift route
+ route:
+ # -- Enables openshift route
+ enabled: false
+ # -- Route labels
+ labels: {}
+ # -- Route annotations
+ annotations: {}
+ # -- Route path
+ path:
+
+ # -- Allows for adding entries to Pod /etc/hosts
+ hostAliases: []
+ # ref: https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
+ # hostAliases:
+ # - ip: 192.168.50.50
+ # hostnames:
+ # - something.local
+ # - ip: 10.0.50.50
+ # hostnames:
+ # - other.local
+
+ # Expose Prometheus metrics
+ prometheus:
+ # If enabled, add the prometheus plugin to the list of plugins to install
+ # https://plugins.jenkins.io/prometheus
+
+ # -- Enables prometheus service monitor
+ enabled: false
+ # -- Additional labels to add to the service monitor object
+ serviceMonitorAdditionalLabels: {}
+ # -- Set a custom namespace where to deploy ServiceMonitor resource
+ serviceMonitorNamespace:
+ # -- How often prometheus should scrape metrics
+ scrapeInterval: 60s
+
+ # Defaults to the default endpoint used by the prometheus plugin
+ # -- The endpoint prometheus should get metrics from
+ scrapeEndpoint: /prometheus
+
+ # See here: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/
+ # The `groups` root object is added by default, add the rule entries
+ # -- Array of prometheus alerting rules
+ alertingrules: []
+ # -- Additional labels to add to the PrometheusRule object
+ alertingRulesAdditionalLabels: {}
+ # -- Set a custom namespace where to deploy PrometheusRule resource
+ prometheusRuleNamespace: ""
+
+ # RelabelConfigs to apply to samples before scraping. Prometheus Operator automatically adds
+ # relabelings for a few standard Kubernetes fields. The original scrape job’s name
+ # is available via the __tmp_prometheus_job_name label.
+ # More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
+ relabelings: []
+ # MetricRelabelConfigs to apply to samples before ingestion.
+ metricRelabelings: []
+
+ googlePodMonitor:
+ # If enabled, It creates Google Managed Prometheus scraping config
+ enabled: false
+ # Set a custom namespace where to deploy PodMonitoring resource
+ # serviceMonitorNamespace: ""
+ scrapeInterval: 60s
+ # This is the default endpoint used by the prometheus plugin
+ scrapeEndpoint: /prometheus
+
+ # -- Can be used to disable rendering controller test resources when using helm template
+ testEnabled: true
+
+ httpsKeyStore:
+ # -- Enables HTTPS keystore on jenkins controller
+ enable: false
+ # -- Name of the secret that already has ssl keystore
+ jenkinsHttpsJksSecretName: ""
+ # -- Name of the key in the secret that already has ssl keystore
+ jenkinsHttpsJksSecretKey: "jenkins-jks-file"
+ # -- Name of the secret that contains the JKS password, if it is not in the same secret as the JKS file
+ jenkinsHttpsJksPasswordSecretName: ""
+ # -- Name of the key in the secret that contains the JKS password
+ jenkinsHttpsJksPasswordSecretKey: "https-jks-password"
+ disableSecretMount: false
+
+ # When HTTPS keystore is enabled, servicePort and targetPort will be used as HTTPS port
+ # -- HTTP Port that Jenkins should listen to along with HTTPS, it also serves as the liveness and readiness probes port.
+ httpPort: 8081
+ # -- Path of HTTPS keystore file
+ path: "/var/jenkins_keystore"
+ # -- Jenkins keystore filename which will appear under controller.httpsKeyStore.path
+ fileName: "keystore.jks"
+ # -- Jenkins keystore password
+ password: "password"
+
+ # -- Base64 encoded Keystore content. Keystore must be converted to base64 then being pasted here
+ jenkinsKeyStoreBase64Encoded:
+ # Convert keystore.jks files content to base64 > $ cat keystore.jks | base64
+# /u3+7QAAAAIAAAABAAAAAQANamVua2luc2NpLmNvbQAAAW2r/b1ZAAAFATCCBP0wDgYKKwYBBAEq
+# AhEBAQUABIIE6QbCqasvoHS0pSwYqSvdydMCB9t+VNfwhFIiiuAelJfO5sSe2SebJbtwHgLcRz1Z
+# gMtWgOSFdl3bWSzA7vrW2LED52h+jXLYSWvZzuDuh8hYO85m10ikF6QR+dTi4jra0whIFDvq3pxe
+# TnESxEsN+DvbZM3jA3qsjQJSeISNpDjO099dqQvHpnCn18lyk7J4TWJ8sOQQb1EM2zDAfAOSqA/x
+# QuPEFl74DlY+5DIk6EBvpmWhaMSvXzWZACGA0sYqa157dq7O0AqmuLG/EI5EkHETO4CrtBW+yLcy
+# 2dUCXOMA+j+NjM1BjrQkYE5vtSfNO6lFZcISyKo5pTFlcA7ut0Fx2nZ8GhHTn32CpeWwNcZBn1gR
+# pZVt6DxVVkhTAkMLhR4rL2wGIi/1WRs23ZOLGKtyDNvDHnQyDiQEoJGy9nAthA8aNHa3cfdF10vB
+# Drb19vtpFHmpvKEEhpk2EBRF4fTi644Fuhu2Ied6118AlaPvEea+n6G4vBz+8RWuVCmZjLU+7h8l
+# Hy3/WdUPoIL5eW7Kz+hS+sRTFzfu9C48dMkQH3a6f3wSY+mufizNF9U298r98TnYy+PfDJK0bstG
+# Ph6yPWx8DGXKQBwrhWJWXI6JwZDeC5Ny+l8p1SypTmAjpIaSW3ge+KgcL6Wtt1R5hUV1ajVwVSUi
+# HF/FachKqPqyLJFZTGjNrxnmNYpt8P1d5JTvJfmfr55Su/P9n7kcyWp7zMcb2Q5nlXt4tWogOHLI
+# OzEWKCacbFfVHE+PpdrcvCVZMDzFogIq5EqGTOZe2poPpBVE+1y9mf5+TXBegy5HToLWvmfmJNTO
+# NCDuBjgLs2tdw2yMPm4YEr57PnMX5gGTC3f2ZihXCIJDCRCdQ9sVBOjIQbOCzxFXkVITo0BAZhCi
+# Yz61wt3Ud8e//zhXWCkCsSV+IZCxxPzhEFd+RFVjW0Nm9hsb2FgAhkXCjsGROgoleYgaZJWvQaAg
+# UyBzMmKDPKTllBHyE3Gy1ehBNGPgEBChf17/9M+j8pcm1OmlM434ctWQ4qW7RU56//yq1soFY0Te
+# fu2ei03a6m68fYuW6s7XEEK58QisJWRAvEbpwu/eyqfs7PsQ+zSgJHyk2rO95IxdMtEESb2GRuoi
+# Bs+AHNdYFTAi+GBWw9dvEgqQ0Mpv0//6bBE/Fb4d7b7f56uUNnnE7mFnjGmGQN+MvC62pfwfvJTT
+# EkT1iZ9kjM9FprTFWXT4UmO3XTvesGeE50sV9YPm71X4DCQwc4KE8vyuwj0s6oMNAUACW2ClU9QQ
+# y0tRpaF1tzs4N42Q5zl0TzWxbCCjAtC3u6xf+c8MCGrr7DzNhm42LOQiHTa4MwX4x96q7235oiAU
+# iQqSI/hyF5yLpWw4etyUvsx2/0/0wkuTU1FozbLoCWJEWcPS7QadMrRRISxHf0YobIeQyz34regl
+# t1qSQ3dCU9D6AHLgX6kqllx4X0fnFq7LtfN7fA2itW26v+kAT2QFZ3qZhINGfofCja/pITC1uNAZ
+# gsJaTMcQ600krj/ynoxnjT+n1gmeqThac6/Mi3YlVeRtaxI2InL82ZuD+w/dfY9OpPssQjy3xiQa
+# jPuaMWXRxz/sS9syOoGVH7XBwKrWpQcpchozWJt40QV5DslJkclcr8aC2AGlzuJMTdEgz1eqV0+H
+# bAXG9HRHN/0eJTn1/QAAAAEABVguNTA5AAADjzCCA4swggJzAhRGqVxH4HTLYPGO4rzHcCPeGDKn
+# xTANBgkqhkiG9w0BAQsFADCBgTELMAkGA1UEBhMCY2ExEDAOBgNVBAgMB29udGFyaW8xEDAOBgNV
+# BAcMB3Rvcm9udG8xFDASBgNVBAoMC2plbmtpbnN0ZXN0MRkwFwYDVQQDDBBqZW5raW5zdGVzdC5p
+# bmZvMR0wGwYJKoZIhvcNAQkBFg50ZXN0QHRlc3QuaW5mbzAeFw0xOTEwMDgxNTI5NTVaFw0xOTEx
+# MDcxNTI5NTVaMIGBMQswCQYDVQQGEwJjYTEQMA4GA1UECAwHb250YXJpbzEQMA4GA1UEBwwHdG9y
+# b250bzEUMBIGA1UECgwLamVua2luc3Rlc3QxGTAXBgNVBAMMEGplbmtpbnN0ZXN0LmluZm8xHTAb
+# BgkqhkiG9w0BCQEWDnRlc3RAdGVzdC5pbmZvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+# AQEA02q352JTHGvROMBhSHvSv+vnoOTDKSTz2aLQn0tYrIRqRo+8bfmMjXuhkwZPSnCpvUGNAJ+w
+# Jrt/dqMoYUjCBkjylD/qHmnXN5EwS1cMg1Djh65gi5JJLFJ7eNcoSsr/0AJ+TweIal1jJSP3t3PF
+# 9Uv21gm6xdm7HnNK66WpUUXLDTKaIs/jtagVY1bLOo9oEVeLN4nT2CYWztpMvdCyEDUzgEdDbmrP
+# F5nKUPK5hrFqo1Dc5rUI4ZshL3Lpv398aMxv6n2adQvuL++URMEbXXBhxOrT6rCtYzbcR5fkwS9i
+# d3Br45CoWOQro02JAepoU0MQKY5+xQ4Bq9Q7tB9BAwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQAe
+# 4xc+mSvKkrKBHg9/zpkWgZUiOp4ENJCi8H4tea/PCM439v6y/kfjT/okOokFvX8N5aa1OSz2Vsrl
+# m8kjIc6hiA7bKzT6lb0EyjUShFFZ5jmGVP4S7/hviDvgB5yEQxOPpumkdRP513YnEGj/o9Pazi5h
+# /MwpRxxazoda9r45kqQpyG+XoM4pB+Fd3JzMc4FUGxfVPxJU4jLawnJJiZ3vqiSyaB0YyUL+Er1Q
+# 6NnqtR4gEBF0ZVlQmkycFvD4EC2boP943dLqNUvop+4R3SM1QMM6P5u8iTXtHd/VN4MwMyy1wtog
+# hYAzODo1Jt59pcqqKJEas0C/lFJEB3frw4ImNx5fNlJYOpx+ijfQs9m39CevDq0=
+
+agent:
+ # -- Enable Kubernetes plugin jnlp-agent podTemplate
+ enabled: true
+ # -- The name of the pod template to use for providing default values
+ defaultsProviderTemplate: ""
+
+ # For connecting to the Jenkins controller
+ # -- Overrides the Kubernetes Jenkins URL
+ jenkinsUrl:
+
+ # connects to the specified host and port, instead of connecting directly to the Jenkins controller
+ # -- Overrides the Kubernetes Jenkins tunnel
+ jenkinsTunnel:
+ # -- The connection timeout in seconds for connections to Kubernetes API. The minimum value is 5
+ kubernetesConnectTimeout: 5
+ # -- The read timeout in seconds for connections to Kubernetes API. The minimum value is 15
+ kubernetesReadTimeout: 15
+ # -- The maximum concurrent connections to Kubernetes API
+ maxRequestsPerHostStr: "32"
+ # -- Time in minutes after which the Kubernetes cloud plugin will clean up an idle worker that has not already terminated
+ retentionTimeout: 5
+ # -- Seconds to wait for pod to be running
+ waitForPodSec: 600
+ # -- Namespace in which the Kubernetes agents should be launched
+ namespace:
+ # -- Custom Pod labels (an object with `label-key: label-value` pairs)
+ podLabels: {}
+ # -- Custom registry used to pull the agent jnlp image from
+ jnlpregistry:
+ image:
+ # -- Repository to pull the agent jnlp image from
+ repository: "jenkins/inbound-agent"
+ # -- Tag of the image to pull
+ tag: "3206.vb_15dcf73f6a_9-3"
+ # -- Configure working directory for default agent
+ workingDir: "/home/jenkins/agent"
+ nodeUsageMode: "NORMAL"
+ # -- Append Jenkins labels to the agent
+ customJenkinsLabels: []
+ # -- Name of the secret to be used to pull the image
+ imagePullSecretName:
+ componentName: "jenkins-agent"
+ # -- Enables agent communication via websockets
+ websocket: false
+ directConnection: false
+ # -- Agent privileged container
+ privileged: false
+ # -- Configure container user
+ runAsUser:
+ # -- Configure container group
+ runAsGroup:
+ # -- Enables the agent to use the host network
+ hostNetworking: false
+ # -- Resources allocation (Requests and Limits)
+ resources:
+ requests:
+ cpu: "512m"
+ memory: "512Mi"
+ # ephemeralStorage:
+ limits:
+ cpu: "512m"
+ memory: "512Mi"
+ # ephemeralStorage:
+ livenessProbe: {}
+# execArgs: "cat /tmp/healthy"
+# failureThreshold: 3
+# initialDelaySeconds: 0
+# periodSeconds: 10
+# successThreshold: 1
+# timeoutSeconds: 1
+
+ # You may want to change this to true while testing a new image
+ # -- Always pull agent container image before build
+ alwaysPullImage: false
+ # When using Pod Security Admission in the Agents namespace with the restricted Pod Security Standard,
+ # the jnlp container cannot be scheduled without overriding its container definition with a securityContext.
+ # This option allows to automatically inject in the jnlp container a securityContext
+ # that is suitable for the use of the restricted Pod Security Standard.
+ # -- Set a restricted securityContext on jnlp containers
+ restrictedPssSecurityContext: false
+ # Controls how agent pods are retained after the Jenkins build completes
+ # Possible values: Always, Never, OnFailure
+ podRetention: "Never"
+ # Disable if you do not want the Yaml the agent pod template to show up
+ # in the job Console Output. This can be helpful for either security reasons
+ # or simply to clean up the output to make it easier to read.
+ showRawYaml: true
+
+ # You can define the volumes that you want to mount for this container
+ # Allowed types are: ConfigMap, EmptyDir, EphemeralVolume, HostPath, Nfs, PVC, Secret
+ # Configure the attributes as they appear in the corresponding Java class for that type
+ # https://github.com/jenkinsci/kubernetes-plugin/tree/master/src/main/java/org/csanchez/jenkins/plugins/kubernetes/volumes
+ # -- Additional volumes
+ volumes: []
+ # - type: ConfigMap
+ # configMapName: myconfigmap
+ # mountPath: /var/myapp/myconfigmap
+ # - type: EmptyDir
+ # mountPath: /var/myapp/myemptydir
+ # memory: false
+ # - type: EphemeralVolume
+ # mountPath: /var/myapp/myephemeralvolume
+ # accessModes: ReadWriteOnce
+ # requestsSize: 10Gi
+ # storageClassName: mystorageclass
+ # - type: HostPath
+ # hostPath: /var/lib/containers
+ # mountPath: /var/myapp/myhostpath
+ # - type: Nfs
+ # mountPath: /var/myapp/mynfs
+ # readOnly: false
+ # serverAddress: "192.0.2.0"
+ # serverPath: /var/lib/containers
+ # - type: PVC
+ # claimName: mypvc
+ # mountPath: /var/myapp/mypvc
+ # readOnly: false
+ # - type: Secret
+ # defaultMode: "600"
+ # mountPath: /var/myapp/mysecret
+ # secretName: mysecret
+ # Pod-wide environment, these vars are visible to any container in the agent pod
+
+ # You can define the workspaceVolume that you want to mount for this container
+ # Allowed types are: DynamicPVC, EmptyDir, EphemeralVolume, HostPath, Nfs, PVC
+ # Configure the attributes as they appear in the corresponding Java class for that type
+ # https://github.com/jenkinsci/kubernetes-plugin/tree/master/src/main/java/org/csanchez/jenkins/plugins/kubernetes/volumes/workspace
+ # -- Workspace volume (defaults to EmptyDir)
+ workspaceVolume: {}
+ ## DynamicPVC example
+ # - type: DynamicPVC
+ # configMapName: myconfigmap
+ ## EmptyDir example
+ # - type: EmptyDir
+ # memory: false
+ ## EphemeralVolume example
+ # - type: EphemeralVolume
+ # accessModes: ReadWriteOnce
+ # requestsSize: 10Gi
+ # storageClassName: mystorageclass
+ ## HostPath example
+ # - type: HostPath
+ # hostPath: /var/lib/containers
+ ## NFS example
+ # - type: Nfs
+ # readOnly: false
+ # serverAddress: "192.0.2.0"
+ # serverPath: /var/lib/containers
+ ## PVC example
+ # - type: PVC
+ # claimName: mypvc
+ # readOnly: false
+
+ # Pod-wide environment, these vars are visible to any container in the agent pod
+ # -- Environment variables for the agent Pod
+ envVars: []
+ # - name: PATH
+ # value: /usr/local/bin
+ # -- Mount a secret as environment variable
+ secretEnvVars: []
+ # - key: PATH
+ # optional: false # default: false
+ # secretKey: MY-K8S-PATH
+ # secretName: my-k8s-secret
+
+ # -- Node labels for pod assignment
+ nodeSelector: {}
+ # Key Value selectors. Ex:
+ # nodeSelector
+ # jenkins-agent: v1
+
+ # -- Command to execute when side container starts
+ command:
+ # -- Arguments passed to command to execute
+ args: "${computer.jnlpmac} ${computer.name}"
+ # -- Side container name
+ sideContainerName: "jnlp"
+
+ # Doesn't allocate pseudo TTY by default
+ # -- Allocate pseudo tty to the side container
+ TTYEnabled: false
+ # -- Max number of agents to launch
+ containerCap: 10
+ # -- Agent Pod base name
+ podName: "default"
+
+ # -- Allows the Pod to remain active for reuse until the configured number of minutes has passed since the last step was executed on it
+ idleMinutes: 0
+
+
+ # The raw yaml of a Pod API Object, for example, this allows usage of toleration for agent pods.
+ # https://github.com/jenkinsci/kubernetes-plugin#using-yaml-to-define-pod-templates
+ # https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ # -- The raw yaml of a Pod API Object to merge into the agent spec
+ yamlTemplate: ""
+ # yamlTemplate: |-
+ # apiVersion: v1
+ # kind: Pod
+ # spec:
+ # tolerations:
+ # - key: "key"
+ # operator: "Equal"
+ # value: "value"
+
+ # -- Defines how the raw yaml field gets merged with yaml definitions from inherited pod templates. Possible values: "merge" or "override"
+ yamlMergeStrategy: "override"
+ # -- Timeout in seconds for an agent to be online
+ connectTimeout: 100
+ # -- Annotations to apply to the pod
+ annotations: {}
+
+ # Containers specified here are added to all agents. Set key empty to remove container from additional agents.
+ # -- Add additional containers to the agents
+ additionalContainers: []
+ # - sideContainerName: dind
+ # image:
+ # repository: docker
+ # tag: dind
+ # command: dockerd-entrypoint.sh
+ # args: ""
+ # privileged: true
+ # resources:
+ # requests:
+ # cpu: 500m
+ # memory: 1Gi
+ # limits:
+ # cpu: 1
+ # memory: 2Gi
+
+ # Useful when configuring agents only with the podTemplates value, since the default podTemplate populated by values mentioned above will be excluded in the rendered template.
+ # -- Disable the default Jenkins Agent configuration
+ disableDefaultAgent: false
+
+ # Below is the implementation of custom pod templates for the default configured kubernetes cloud.
+ # Add a key under podTemplates for each pod template. Each key (prior to | character) is just a label, and can be any value.
+ # Keys are only used to give the pod template a meaningful name. The only restriction is they may only contain RFC 1123 \ DNS label
+ # characters: lowercase letters, numbers, and hyphens. Each pod template can contain multiple containers.
+ # For this pod templates configuration to be loaded, the following values must be set:
+ # controller.JCasC.defaultConfig: true
+ # Best reference is https://<jenkins_url>/configuration-as-code/reference#Cloud-kubernetes. The example below creates a python pod template.
+ # -- Configures extra pod templates for the default kubernetes cloud
+ podTemplates: {}
+ # python: |
+ # - name: python
+ # label: jenkins-python
+ # serviceAccount: jenkins
+ # containers:
+ # - name: python
+ # image: python:3
+ # command: "/bin/sh -c"
+ # args: "cat"
+ # ttyEnabled: true
+ # privileged: true
+ # resourceRequestCpu: "400m"
+ # resourceRequestMemory: "512Mi"
+ # resourceLimitCpu: "1"
+ # resourceLimitMemory: "1024Mi"
+
+# Inherits all values from `agent` so you only need to specify values which differ
+# -- Configure additional
+additionalAgents: {}
+# maven:
+# podName: maven
+# customJenkinsLabels: maven
+# # An example of overriding the jnlp container
+# # sideContainerName: jnlp
+# image:
+# repository: jenkins/jnlp-agent-maven
+# tag: latest
+# python:
+# podName: python
+# customJenkinsLabels: python
+# sideContainerName: python
+# image:
+# repository: python
+# tag: "3"
+# command: "/bin/sh -c"
+# args: "cat"
+# TTYEnabled: true
+
+# Here you can add additional clouds
+# They inherit all values from the default cloud (including the main agent), so
+# you only need to specify values which differ. If you want to override
+# default additionalAgents with the additionalClouds.additionalAgents set
+# additionalAgentsOverride to `true`.
+additionalClouds: {}
+# remote-cloud-1:
+# kubernetesURL: https://api.remote-cloud.com
+# additionalAgentsOverride: true
+# additionalAgents:
+# maven-2:
+# podName: maven-2
+# customJenkinsLabels: maven
+# # An example of overriding the jnlp container
+# # sideContainerName: jnlp
+# image:
+# repository: jenkins/jnlp-agent-maven
+# tag: latest
+# namespace: my-other-maven-namespace
+# remote-cloud-2:
+# kubernetesURL: https://api.remote-cloud.com
+
+persistence:
+ # -- Enable the use of a Jenkins PVC
+ enabled: true
+
+ # A manually managed Persistent Volume and Claim
+ # Requires persistence.enabled: true
+ # If defined, PVC must be created manually before volume will be bound
+ # -- Provide the name of a PVC
+ existingClaim:
+
+ # jenkins data Persistent Volume Storage Class
+ # If defined, storageClassName: <storageClass>
+ # If set to "-", storageClassName: "", which disables dynamic provisioning
+ # If undefined (the default) or set to null, no storageClassName spec is
+ # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS & OpenStack)
+ # -- Storage class for the PVC
+ storageClass:
+ # -- Annotations for the PVC
+ annotations: {}
+ # -- Labels for the PVC
+ labels: {}
+ # -- The PVC access mode
+ accessMode: "ReadWriteOnce"
+ # -- The size of the PVC
+ size: "8Gi"
+
+ # ref: https://kubernetes.io/docs/concepts/storage/volume-pvc-datasource/
+ # -- Existing data source to clone PVC from
+ dataSource: {}
+ # name: PVC-NAME
+ # kind: PersistentVolumeClaim
+
+ # -- SubPath for jenkins-home mount
+ subPath:
+ # -- Additional volumes
+ volumes: []
+ # - name: nothing
+ # emptyDir: {}
+
+ # -- Additional mounts
+ mounts: []
+ # - mountPath: /var/nothing
+ # name: nothing
+ # readOnly: true
+
+networkPolicy:
+ # -- Enable the creation of NetworkPolicy resources
+ enabled: false
+
+ # For Kubernetes v1.4, v1.5 and v1.6, use 'extensions/v1beta1'
+ # For Kubernetes v1.7, use 'networking.k8s.io/v1'
+ # -- NetworkPolicy ApiVersion
+ apiVersion: networking.k8s.io/v1
+ # You can allow agents to connect from both within the cluster (from within specific/all namespaces) AND/OR from a given external IP range
+ internalAgents:
+ # -- Allow internal agents (from the same cluster) to connect to controller. Agent pods will be filtered based on PodLabels
+ allowed: true
+ # -- A map of labels (keys/values) that agent pods must have to be able to connect to controller
+ podLabels: {}
+ # -- A map of labels (keys/values) that agents namespaces must have to be able to connect to controller
+ namespaceLabels: {}
+ # project: myproject
+ externalAgents:
+ # -- The IP range from which external agents are allowed to connect to controller, i.e., 172.17.0.0/16
+ ipCIDR:
+ # -- A list of IP sub-ranges to be excluded from the allowlisted IP range
+ except: []
+ # - 172.17.1.0/24
+
+## Install Default RBAC roles and bindings
+rbac:
+ # -- Whether RBAC resources are created
+ create: true
+ # -- Whether the Jenkins service account should be able to read Kubernetes secrets
+ readSecrets: false
+
+serviceAccount:
+ # -- Configures if a ServiceAccount with this name should be created
+ create: true
+
+ # The name of the ServiceAccount is autogenerated by default
+ # -- The name of the ServiceAccount to be used by access-controlled resources
+ name:
+ # -- Configures annotations for the ServiceAccount
+ annotations: {}
+ # -- Configures extra labels for the ServiceAccount
+ extraLabels: {}
+ # -- Controller ServiceAccount image pull secret
+ imagePullSecretName:
+
+
+serviceAccountAgent:
+ # -- Configures if an agent ServiceAccount should be created
+ create: false
+
+ # If not set and create is true, a name is generated using the fullname template
+ # -- The name of the agent ServiceAccount to be used by access-controlled resources
+ name:
+ # -- Configures annotations for the agent ServiceAccount
+ annotations: {}
+ # -- Configures extra labels for the agent ServiceAccount
+ extraLabels: {}
+ # -- Agent ServiceAccount image pull secret
+ imagePullSecretName:
+
+# -- Checks if any deprecated values are used
+checkDeprecation: true
+
+awsSecurityGroupPolicies:
+ enabled: false
+ policies:
+ - name: ""
+ securityGroupIds: []
+ podSelector: {}
+
+# Here you can configure unit tests values when executing the helm unittest in the CONTRIBUTING.md
+helmtest:
+ # A testing framework for bash
+ bats:
+ # Bash Automated Testing System (BATS)
+ image:
+ # -- Registry of the image used to test the framework
+ registry: "docker.io"
+ # -- Repository of the image used to test the framework
+ repository: "bats/bats"
+ # -- Tag of the image to test the framework
+ tag: "1.11.0"
diff --git a/charts/k8s-gerrit/.github/PULL_REQUEST_TEMPLATE.md b/charts/k8s-gerrit/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 0000000..fd8a1af
--- /dev/null
+++ b/charts/k8s-gerrit/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,9 @@
+# Important Notice
+
+Patch submission and review is done through
+[Gerrit Code Review](https://gerrit-review.googlesource.com).
+Unfortunately we cannot pull your code as a Pull Request.
+
+__NO REVIEWS OR DISCUSSIONS will happen on GitHub__, all the
+[code collaboration](../Documentation/developer-guide.md)
+will take place on Gerrit.
diff --git a/charts/k8s-gerrit/.gitignore b/charts/k8s-gerrit/.gitignore
new file mode 100644
index 0000000..6beacc0
--- /dev/null
+++ b/charts/k8s-gerrit/.gitignore
@@ -0,0 +1,14 @@
+*.pem
+*.crt
+/helm-charts/*/charts
+/helm-charts/*/requirements.lock
+
+__pycache__
+.pytest_cache
+*.pyc
+
+*bin/
+.DS_Store
+.vscode/
+
+.project
\ No newline at end of file
diff --git a/charts/k8s-gerrit/.mailmap b/charts/k8s-gerrit/.mailmap
new file mode 100644
index 0000000..cc103a4
--- /dev/null
+++ b/charts/k8s-gerrit/.mailmap
@@ -0,0 +1,2 @@
+Matthias Sohn <matthias.sohn@sap.com> <matthias.sohn@gmail.com>
+Thomas Draebing <thomas.draebing@sap.com>
diff --git a/charts/k8s-gerrit/.pylintrc b/charts/k8s-gerrit/.pylintrc
new file mode 100644
index 0000000..2e74428
--- /dev/null
+++ b/charts/k8s-gerrit/.pylintrc
@@ -0,0 +1,15 @@
+[MESSAGES CONTROL]
+disable=C0111, W0621, R0201, R0913, R0903, W0511, C0330
+
+[BASIC]
+no-docstring-rgx=(test_.*)|(__.*__)
+
+[FORMAT]
+indent-string=' '
+good-names=i,f
+
+[SIMILARITIES]
+min-similarity-lines=6
+
+[MASTER]
+init-hook='import sys; sys.path.append("./tests/helpers")'
diff --git a/charts/k8s-gerrit/Documentation/developer-guide.md b/charts/k8s-gerrit/Documentation/developer-guide.md
new file mode 100644
index 0000000..0e5fc5f
--- /dev/null
+++ b/charts/k8s-gerrit/Documentation/developer-guide.md
@@ -0,0 +1,83 @@
+# Developer Guide
+
+[TOC]
+
+## Code Review
+
+This project uses Gerrit for code review:
+https://gerrit-review.googlesource.com/
+which uses the ["git push" workflow][1] with server
+https://gerrit.googlesource.com/k8s-gerrit. You will need a
+[generated cookie][2].
+
+Gerrit depends on "Change-Id" annotations in your commit message.
+If you try to push a commit without one, it will explain how to
+install the proper git-hook:
+
+```
+curl -Lo `git rev-parse --git-dir`/hooks/commit-msg \
+ https://gerrit-review.googlesource.com/tools/hooks/commit-msg
+chmod +x `git rev-parse --git-dir`/hooks/commit-msg
+```
+
+Before you create your local commit (which you'll push to Gerrit)
+you will need to set your email to match your Gerrit account:
+
+```
+git config --local --add user.email foo@bar.com
+```
+
+Normally you will create code reviews by pushing for master:
+
+```
+git push origin HEAD:refs/for/master
+```
+
+## Developing container images
+
+When changing or creating container images, keep the image size as small as
+possible. This reduces storage space needed for images, the upload time and most
+importantly the download time, which improves startup time of pods.
+
+Some good practices are listed here:
+
+- **Chain commands:** Each `RUN`-command creates a new layer in the docker image.
+Each layer increases the total image size. Thus, reducing the number of layers,
+can also reduce the image size.
+
+- **Clean up after package installation:** The package installation creates a
+number of cache files, which should be removed after installation. In Ubuntu/Debian-
+based images use the following snippet (This requires `apt-get update` before
+each package installation!):
+
+```docker
+RUN apt-get update && \
+ apt get install -y <packages> && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
+```
+
+In Alpine based images use the `--no-cache`-flag of `apk`.
+
+- **Clean up temporary files immediately:** If temporary files are created by a
+command remove them in the same command chain.
+
+- **Use multi stage builds:** If some complicated build processes are needed for
+building parts of the container image, of which only the final product is needed,
+use [multi stage builds][3]
+
+
+[1]: https://gerrit-review.googlesource.com/Documentation/user-upload.html#_git_push
+[2]: https://gerrit.googlesource.com/new-password
+[3]: https://docs.docker.com/develop/develop-images/multistage-build/
+
+## Writing clean python code
+
+When writing python code, either for tests or for scripts, use `black` and `pylint`
+to ensure a clean code style. They can be run by the following commands:
+
+```sh
+pipenv install --dev
+pipenv run black $(find . -name '*.py')
+pipenv run pylint $(find . -name '*.py')
+```
diff --git a/charts/k8s-gerrit/Documentation/istio.md b/charts/k8s-gerrit/Documentation/istio.md
new file mode 100644
index 0000000..1803b32
--- /dev/null
+++ b/charts/k8s-gerrit/Documentation/istio.md
@@ -0,0 +1,40 @@
+# Istio
+
+Istio provides an alternative way to control ingress traffic into the cluster.
+In addition, it allows to finetune the traffic inside the cluster and provides
+a huge repertoire of load balancing and routing mechanisms.
+
+***note
+Currently, only the Gerrit replica chart allows using istio out of the box.
+***
+
+## Install istio
+
+An example configuration based on the default profile provided by istio can be
+found under `./istio/src/`. Some values will have to be adapted to the respective
+system. These are marked by comments tagged with `TO_BE_CHANGED`.
+To install istio with this configuration, run:
+
+```sh
+kubectl apply -f istio/istio-system-namespace.yaml
+istioctl install -f istio/gerrit.profile.yaml
+```
+
+To install Gerrit using istio for networking, the namespace running Gerrit has to
+be configured to enable sidecar injection, by setting the `istio-injection: enabled`
+label. An example for such a namespace can be found at `./istio/namespace.yaml`.
+
+## Uninstall istio
+
+To uninstall istio, run:
+
+```sh
+istioctl uninstall -f istio/gerrit.profile.yaml
+```
+
+## Restricting access to a list of allowed IPs
+
+In development setups, it might be wanted to allow access to the setup only from
+specified IPs. This can be done by patching the `spec.loadBalancerSourceRanges`
+value of the service used for the IngressGateway. A patch doing that can be
+uncommented in `istio/gerrit.profile.yaml`.
diff --git a/charts/k8s-gerrit/Documentation/minikube.md b/charts/k8s-gerrit/Documentation/minikube.md
new file mode 100644
index 0000000..18af7cc
--- /dev/null
+++ b/charts/k8s-gerrit/Documentation/minikube.md
@@ -0,0 +1,207 @@
+# Running Gerrit on Kubernetes using Minikube
+
+To test Gerrit on Kubernetes locally, a one-node cluster can be set up using
+Minikube. Minikube provides basic Kubernetes functionality and allows to quickly
+deploy and evaluate a Kubernetes deployment.
+This tutorial will guide through setting up Minikube to deploy the gerrit and
+gerrit-replica helm charts to it. Note, that due to limited compute
+resources on a single local machine and the restricted functionality of Minikube,
+the full functionality of the charts might not be usable.
+
+## Installing Kubectl and Minikube
+
+To use Minikube, a hypervisor is needed. A good non-commercial solution is HyperKit.
+The Minikube project provides binaries to install the driver:
+
+```sh
+curl -LO https://storage.googleapis.com/minikube/releases/latest/docker-machine-driver-hyperkit \
+ && sudo install -o root -g wheel -m 4755 docker-machine-driver-hyperkit /usr/local/bin/
+```
+
+To manage Kubernetes clusters, the Kubectl CLI tool will be needed. A detailed
+guide how to do that for all supported OSs can be found
+[here](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-with-homebrew-on-macos).
+On OSX hombrew can be used for installation:
+
+```sh
+brew install kubernetes-cli
+```
+
+Finally, Minikube can be installed. Download the latest binary
+[here](https://github.com/kubernetes/minikube/releases). To install it on OSX, run:
+
+```sh
+VERSION=1.1.0
+curl -Lo minikube https://storage.googleapis.com/minikube/releases/v$VERSION/minikube-darwin-amd64 && \
+ chmod +x minikube && \
+ sudo cp minikube /usr/local/bin/ && \
+ rm minikube
+```
+
+## Starting a Minikube cluster
+
+For a more detailed overview over the features of Minikube refer to the
+[official documentation](https://kubernetes.io/docs/setup/minikube/). If a
+hypervisor driver other than virtual box (e.g. hyperkit) is used, set the
+`--vm-driver` option accordingly:
+
+```sh
+minikube config set vm-driver hyperkit
+```
+
+The gerrit and gerrit-replica charts are configured to work with the default
+resource limits configured for minikube (2 cpus and 2Gi RAM). If more resources
+are desired (e.g. to speed up deployment startup or for more resource intensive
+tests), configure the resource limits using:
+
+```sh
+minikube config set memory 4096
+minikube config set cpus 4
+```
+
+To install a full Gerrit and Gerrit replica setup with reasonable startup
+times, Minikube will need about 9.5 GB of RAM and 3-4 CPUs! But the more the
+better.
+
+To start a Minikube cluster simply run:
+
+```sh
+minikube start
+```
+
+Starting up the cluster will take a while. The installation should automatically
+configure kubectl to connect to the Minikube cluster. Run the following command
+to test whether the cluster is up:
+
+```sh
+kubectl get nodes
+
+NAME STATUS ROLES AGE VERSION
+minikube Ready master 1h v1.14.2
+```
+
+The helm-charts use ingresses, which can be used in Minikube by enabling the
+ingress addon:
+
+```sh
+minikube addons enable ingress
+```
+
+Since for testing there will probably no usable host names configured to point
+to the minikube installation, the traffic to the hostnames configured in the
+Ingress definition needs to be redirected to Minikube by editing the `/etc/hosts`-
+file, adding a line containing the Minikube IP and a whitespace-delimited list
+of all the hostnames:
+
+```sh
+echo "$(minikube ip) primary.gerrit backend.gerrit replica.gerrit" | sudo tee -a /etc/hosts
+```
+
+The host names (e.g. `primary.gerrit`) are the defaults, when using the values.yaml
+files provided as and example for minikube. Change them accordingly, if a different
+one is chosen.
+This will only redirect traffic from the computer running Minikube.
+
+To see whether all cluster components are ready, run:
+
+```sh
+kubectl get pods --all-namespaces
+```
+
+The status of all components should be `Ready`. The kubernetes dashboard giving
+an overview over all cluster components, can be opened by executing:
+
+```sh
+minikube dashboard
+```
+
+## Install helm
+
+Helm is needed to install and manage the helm charts. To install the helm client
+on your local machine (running OSX), run:
+
+```sh
+brew install kubernetes-helm
+```
+
+A guide for all suported OSs can be found [here](https://docs.helm.sh/using_helm/#installing-helm).
+
+## Start an NFS-server
+
+The helm-charts need a volume with ReadWriteMany access mode to store
+git-repositories. This guide will use the nfs-server-provisioner chart to provide
+NFS-volumes directly in the cluster. A basic configuration file for the nfs-server-
+provisioner-chart is provided in the supplements-directory. It can be installed
+by running:
+
+```sh
+helm install nfs \
+ stable/nfs-server-provisioner \
+ -f ./supplements/nfs.minikube.values.yaml
+```
+
+## Installing the gerrit helm chart
+
+A configuration file to configure the gerrit chart is provided at
+`./supplements/gerrit.minikube.values.yaml`. To install the gerrit
+chart on Minikube, run:
+
+```sh
+helm install gerrit \
+ ./helm-charts/gerrit \
+ -f ./supplements/gerrit.minikube.values.yaml
+```
+
+Startup may take some time, especially when allowing only a small amount of
+resources to the containers. Check progress with `kubectl get pods -w` until
+it says that the pod `gerrit-gerrit-stateful-set-0` is `Running`.
+Then use `kubectl logs -f gerrit-gerrit-stateful-set-0` to follow
+the startup process of Gerrit until a line like this shows that Gerrit is ready:
+
+```sh
+[2019-06-04 15:24:25,914] [main] INFO com.google.gerrit.pgm.Daemon : Gerrit Code Review 2.16.8-86-ga831ebe687 ready
+```
+
+To open Gerrit's UI, run:
+
+```sh
+open http://primary.gerrit
+```
+
+## Installing the gerrit-replica helm chart
+
+A custom configuration file to configure the gerrit-replica chart is provided at
+`./supplements/gerrit-replica.minikube.values.yaml`. Install it by running:
+
+```sh
+helm install gerrit-replica \
+ ./helm-charts/gerrit-replica \
+ -f ./supplements/gerrit-replica.minikube.values.yaml
+```
+
+The replica will start up, which can be followed by running:
+
+```sh
+kubectl logs -f gerrit-replica-gerrit-replica-deployment-<id>
+```
+
+Replication of repositories has to be started on the Gerrit, e.g. by making
+a change in the respective repositories. Only then previous changes to the
+repositories will be available on the replica.
+
+## Cleanup
+
+Shut down minikube:
+
+```sh
+minikube stop
+```
+
+Delete the minikube cluster:
+
+```sh
+minikube delete
+```
+
+Remove the line added to `/etc/hosts`. If Minikube is restarted, the cluster will
+get a new IP and the `/etc/hosts`-entry has to be adjusted.
diff --git a/charts/k8s-gerrit/Documentation/operator-api-reference.md b/charts/k8s-gerrit/Documentation/operator-api-reference.md
new file mode 100644
index 0000000..5d03cf5
--- /dev/null
+++ b/charts/k8s-gerrit/Documentation/operator-api-reference.md
@@ -0,0 +1,1160 @@
+# Gerrit Operator - API Reference
+
+- [Gerrit Operator - API Reference](#gerrit-operator---api-reference)
+ - [General Remarks](#general-remarks)
+ - [Inheritance](#inheritance)
+ - [GerritCluster](#gerritcluster)
+ - [Gerrit](#gerrit)
+ - [Receiver](#receiver)
+ - [GitGarbageCollection](#gitgarbagecollection)
+ - [GerritNetwork](#gerritnetwork)
+ - [GerritClusterSpec](#gerritclusterspec)
+ - [GerritClusterStatus](#gerritclusterstatus)
+ - [StorageConfig](#storageconfig)
+ - [GerritStorageConfig](#gerritstorageconfig)
+ - [StorageClassConfig](#storageclassconfig)
+ - [NfsWorkaroundConfig](#nfsworkaroundconfig)
+ - [SharedStorage](#sharedstorage)
+ - [PluginCacheConfig](#plugincacheconfig)
+ - [ExternalPVCConfig](#externalpvcconfig)
+ - [ContainerImageConfig](#containerimageconfig)
+ - [BusyBoxImage](#busyboximage)
+ - [GerritRepositoryConfig](#gerritrepositoryconfig)
+ - [GerritClusterIngressConfig](#gerritclusteringressconfig)
+ - [GerritIngressTlsConfig](#gerritingresstlsconfig)
+ - [GerritIngressAmbassadorConfig](#gerritingressambassadorconfig)
+ - [GlobalRefDbConfig](#globalrefdbconfig)
+ - [RefDatabase](#refdatabase)
+ - [SpannerRefDbConfig](#spannerrefdbconfig)
+ - [ZookeeperRefDbConfig](#zookeeperrefdbconfig)
+ - [GerritTemplate](#gerrittemplate)
+ - [GerritTemplateSpec](#gerrittemplatespec)
+ - [GerritProbe](#gerritprobe)
+ - [GerritServiceConfig](#gerritserviceconfig)
+ - [GerritSite](#gerritsite)
+ - [GerritModule](#gerritmodule)
+ - [GerritPlugin](#gerritplugin)
+ - [GerritMode](#gerritmode)
+ - [GerritDebugConfig](#gerritdebugconfig)
+ - [GerritSpec](#gerritspec)
+ - [GerritStatus](#gerritstatus)
+ - [IngressConfig](#ingressconfig)
+ - [ReceiverTemplate](#receivertemplate)
+ - [ReceiverTemplateSpec](#receivertemplatespec)
+ - [ReceiverSpec](#receiverspec)
+ - [ReceiverStatus](#receiverstatus)
+ - [ReceiverProbe](#receiverprobe)
+ - [ReceiverServiceConfig](#receiverserviceconfig)
+ - [GitGarbageCollectionSpec](#gitgarbagecollectionspec)
+ - [GitGarbageCollectionStatus](#gitgarbagecollectionstatus)
+ - [GitGcState](#gitgcstate)
+ - [GerritNetworkSpec](#gerritnetworkspec)
+ - [NetworkMember](#networkmember)
+ - [NetworkMemberWithSsh](#networkmemberwithssh)
+
+## General Remarks
+
+### Inheritance
+
+Some objects inherit the fields of other objects. In this case the section will
+contain an **Extends:** label to link to the parent object, but it will not repeat
+inherited fields.
+
+## GerritCluster
+
+---
+
+**Group**: gerritoperator.google.com \
+**Version**: v1alpha17 \
+**Kind**: GerritCluster
+
+---
+
+
+| Field | Type | Description |
+|---|---|---|
+| `apiVersion` | `String` | APIVersion of this resource |
+| `kind` | `String` | Kind of this resource |
+| `metadata` | [`ObjectMeta`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta) | Metadata of the resource |
+| `spec` | [`GerritClusterSpec`](#gerritclusterspec) | Specification for GerritCluster |
+| `status` | [`GerritClusterStatus`](#gerritclusterstatus) | Status for GerritCluster |
+
+Example:
+
+```yaml
+apiVersion: "gerritoperator.google.com/v1alpha17"
+kind: GerritCluster
+metadata:
+ name: gerrit
+spec:
+ containerImages:
+ imagePullSecrets: []
+ imagePullPolicy: Always
+ gerritImages:
+ registry: docker.io
+ org: k8sgerrit
+ tag: latest
+ busyBox:
+ registry: docker.io
+ tag: latest
+
+ storage:
+ storageClasses:
+ readWriteOnce: default
+ readWriteMany: shared-storage
+ nfsWorkaround:
+ enabled: false
+ chownOnStartup: false
+ idmapdConfig: |-
+ [General]
+ Verbosity = 0
+ Domain = localdomain.com
+
+ [Mapping]
+ Nobody-User = nobody
+ Nobody-Group = nogroup
+
+ sharedStorage:
+ externalPVC:
+ enabled: false
+ claimName: ""
+ size: 1Gi
+ volumeName: ""
+ selector:
+ matchLabels:
+ volume-type: ssd
+ aws-availability-zone: us-east-1
+
+ pluginCache:
+ enabled: false
+
+ ingress:
+ enabled: true
+ host: example.com
+ annotations: {}
+ tls:
+ enabled: false
+ secret: ""
+ ambassador:
+ id: []
+ createHost: false
+
+ refdb:
+ database: NONE
+ spanner:
+ projectName: ""
+ instance: ""
+ database: ""
+ zookeeper:
+ connectString: ""
+ rootNode: ""
+
+ serverId: ""
+
+ gerrits:
+ - metadata:
+ name: gerrit
+ labels:
+ app: gerrit
+ spec:
+ serviceAccount: gerrit
+
+ tolerations:
+ - key: key1
+ operator: Equal
+ value: value1
+ effect: NoSchedule
+
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: disktype
+ operator: In
+ values:
+ - ssd
+
+ topologySpreadConstraints: []
+ - maxSkew: 1
+ topologyKey: zone
+ whenUnsatisfiable: DoNotSchedule
+ labelSelector:
+ matchLabels:
+ foo: bar
+
+ priorityClassName: ""
+
+ replicas: 1
+ updatePartition: 0
+
+ resources:
+ requests:
+ cpu: 1
+ memory: 5Gi
+ limits:
+ cpu: 1
+ memory: 6Gi
+
+ startupProbe:
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+
+ readinessProbe:
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+
+ livenessProbe:
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+
+ gracefulStopTimeout: 30
+
+ service:
+ type: NodePort
+ httpPort: 80
+ sshPort: 29418
+
+ mode: REPLICA
+
+ debug:
+ enabled: false
+ suspend: false
+
+ site:
+ size: 1Gi
+
+ plugins:
+ # Installs a packaged plugin
+ - name: delete-project
+
+ # Downloads and installs a plugin
+ - name: javamelody
+ url: https://gerrit-ci.gerritforge.com/view/Plugins-stable-3.6/job/plugin-javamelody-bazel-master-stable-3.6/lastSuccessfulBuild/artifact/bazel-bin/plugins/javamelody/javamelody.jar
+ sha1: 40ffcd00263171e373a24eb6a311791b2924707c
+
+ # If the `installAsLibrary` option is set to `true` the plugin's jar-file will
+ # be symlinked to the lib directory and thus installed as a library as well.
+ - name: saml
+ url: https://gerrit-ci.gerritforge.com/view/Plugins-stable-3.6/job/plugin-saml-bazel-master-stable-3.6/lastSuccessfulBuild/artifact/bazel-bin/plugins/saml/saml.jar
+ sha1: 6dfe8292d46b179638586e6acf671206f4e0a88b
+ installAsLibrary: true
+
+ libs:
+ - name: global-refdb
+ url: https://example.com/global-refdb.jar
+ sha1: 3d533a536b0d4e0184f824478c24bc8dfe896d06
+
+ configFiles:
+ gerrit.config: |-
+ [gerrit]
+ serverId = gerrit-1
+ disableReverseDnsLookup = true
+ [index]
+ type = LUCENE
+ [auth]
+ type = DEVELOPMENT_BECOME_ANY_ACCOUNT
+ [httpd]
+ requestLog = true
+ gracefulStopTimeout = 1m
+ [transfer]
+ timeout = 120 s
+ [user]
+ name = Gerrit Code Review
+ email = gerrit@example.com
+ anonymousCoward = Unnamed User
+ [container]
+ javaOptions = -Xms200m
+ javaOptions = -Xmx4g
+
+ secretRef: gerrit-secure-config
+
+ receiver:
+ metadata:
+ name: receiver
+ labels:
+ app: receiver
+ spec:
+ tolerations:
+ - key: key1
+ operator: Equal
+ value: value2
+ effect: NoSchedule
+
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: disktype
+ operator: In
+ values:
+ - ssd
+
+ topologySpreadConstraints: []
+ - maxSkew: 1
+ topologyKey: zone
+ whenUnsatisfiable: DoNotSchedule
+ labelSelector:
+ matchLabels:
+ foo: bar
+
+ priorityClassName: ""
+
+ replicas: 2
+ maxSurge: 1
+ maxUnavailable: 1
+
+ resources:
+ requests:
+ cpu: 1
+ memory: 5Gi
+ limits:
+ cpu: 1
+ memory: 6Gi
+
+ readinessProbe:
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+
+ livenessProbe:
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+
+ service:
+ type: NodePort
+ httpPort: 80
+
+ credentialSecretRef: receiver-credentials
+```
+
+## Gerrit
+
+---
+
+**Group**: gerritoperator.google.com \
+**Version**: v1alpha17 \
+**Kind**: Gerrit
+
+---
+
+
+| Field | Type | Description |
+|---|---|---|
+| `apiVersion` | `String` | APIVersion of this resource |
+| `kind` | `String` | Kind of this resource |
+| `metadata` | [`ObjectMeta`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta) | Metadata of the resource |
+| `spec` | [`GerritSpec`](#gerritspec) | Specification for Gerrit |
+| `status` | [`GerritStatus`](#gerritstatus) | Status for Gerrit |
+
+Example:
+
+```yaml
+apiVersion: "gerritoperator.google.com/v1alpha17"
+kind: Gerrit
+metadata:
+ name: gerrit
+spec:
+ serviceAccount: gerrit
+
+ tolerations:
+ - key: key1
+ operator: Equal
+ value: value1
+ effect: NoSchedule
+
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: disktype
+ operator: In
+ values:
+ - ssd
+
+ topologySpreadConstraints:
+ - maxSkew: 1
+ topologyKey: zone
+ whenUnsatisfiable: DoNotSchedule
+ labelSelector:
+ matchLabels:
+ foo: bar
+
+ priorityClassName: ""
+
+ replicas: 1
+ updatePartition: 0
+
+ resources:
+ requests:
+ cpu: 1
+ memory: 5Gi
+ limits:
+ cpu: 1
+ memory: 6Gi
+
+ startupProbe:
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+
+ readinessProbe:
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+
+ livenessProbe:
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+
+ gracefulStopTimeout: 30
+
+ service:
+ type: NodePort
+ httpPort: 80
+ sshPort: 29418
+
+ mode: PRIMARY
+
+ debug:
+ enabled: false
+ suspend: false
+
+ site:
+ size: 1Gi
+
+ plugins:
+ # Installs a plugin packaged into the gerrit.war file
+ - name: delete-project
+
+ # Downloads and installs a plugin
+ - name: javamelody
+ url: https://gerrit-ci.gerritforge.com/view/Plugins-stable-3.6/job/plugin-javamelody-bazel-master-stable-3.6/lastSuccessfulBuild/artifact/bazel-bin/plugins/javamelody/javamelody.jar
+ sha1: 40ffcd00263171e373a24eb6a311791b2924707c
+
+ # If the `installAsLibrary` option is set to `true` the plugin jar-file will
+ # be symlinked to the lib directory and thus installed as a library as well.
+ - name: saml
+ url: https://gerrit-ci.gerritforge.com/view/Plugins-stable-3.6/job/plugin-saml-bazel-master-stable-3.6/lastSuccessfulBuild/artifact/bazel-bin/plugins/saml/saml.jar
+ sha1: 6dfe8292d46b179638586e6acf671206f4e0a88b
+ installAsLibrary: true
+
+ libs:
+ - name: global-refdb
+ url: https://example.com/global-refdb.jar
+ sha1: 3d533a536b0d4e0184f824478c24bc8dfe896d06
+
+ configFiles:
+ gerrit.config: |-
+ [gerrit]
+ serverId = gerrit-1
+ disableReverseDnsLookup = true
+ [index]
+ type = LUCENE
+ [auth]
+ type = DEVELOPMENT_BECOME_ANY_ACCOUNT
+ [httpd]
+ requestLog = true
+ gracefulStopTimeout = 1m
+ [transfer]
+ timeout = 120 s
+ [user]
+ name = Gerrit Code Review
+ email = gerrit@example.com
+ anonymousCoward = Unnamed User
+ [container]
+ javaOptions = -Xms200m
+ javaOptions = -Xmx4g
+
+ secretRef: gerrit-secure-config
+
+ serverId: ""
+
+ containerImages:
+ imagePullSecrets: []
+ imagePullPolicy: Always
+ gerritImages:
+ registry: docker.io
+ org: k8sgerrit
+ tag: latest
+ busyBox:
+ registry: docker.io
+ tag: latest
+
+ storage:
+ storageClasses:
+ readWriteOnce: default
+ readWriteMany: shared-storage
+ nfsWorkaround:
+ enabled: false
+ chownOnStartup: false
+ idmapdConfig: |-
+ [General]
+ Verbosity = 0
+ Domain = localdomain.com
+
+ [Mapping]
+ Nobody-User = nobody
+ Nobody-Group = nogroup
+
+ sharedStorage:
+ externalPVC:
+ enabled: false
+ claimName: ""
+ size: 1Gi
+ volumeName: ""
+ selector:
+ matchLabels:
+ volume-type: ssd
+ aws-availability-zone: us-east-1
+
+ pluginCache:
+ enabled: false
+
+ ingress:
+ host: example.com
+ tlsEnabled: false
+
+ refdb:
+ database: NONE
+ spanner:
+ projectName: ""
+ instance: ""
+ database: ""
+ zookeeper:
+ connectString: ""
+ rootNode: ""
+```
+
+## Receiver
+
+---
+
+**Group**: gerritoperator.google.com \
+**Version**: v1alpha6 \
+**Kind**: Receiver
+
+---
+
+
+| Field | Type | Description |
+|---|---|---|
+| `apiVersion` | `String` | APIVersion of this resource |
+| `kind` | `String` | Kind of this resource |
+| `metadata` | [`ObjectMeta`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta) | Metadata of the resource |
+| `spec` | [`ReceiverSpec`](#receiverspec) | Specification for Receiver |
+| `status` | [`ReceiverStatus`](#receiverstatus) | Status for Receiver |
+
+Example:
+
+```yaml
+apiVersion: "gerritoperator.google.com/v1alpha6"
+kind: Receiver
+metadata:
+ name: receiver
+spec:
+ tolerations:
+ - key: key1
+ operator: Equal
+ value: value1
+ effect: NoSchedule
+
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: disktype
+ operator: In
+ values:
+ - ssd
+
+ topologySpreadConstraints:
+ - maxSkew: 1
+ topologyKey: zone
+ whenUnsatisfiable: DoNotSchedule
+ labelSelector:
+ matchLabels:
+ foo: bar
+
+ priorityClassName: ""
+
+ replicas: 1
+ maxSurge: 1
+ maxUnavailable: 1
+
+ resources:
+ requests:
+ cpu: 1
+ memory: 5Gi
+ limits:
+ cpu: 1
+ memory: 6Gi
+
+ readinessProbe:
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+
+ livenessProbe:
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+
+ service:
+ type: NodePort
+ httpPort: 80
+
+ credentialSecretRef: apache-credentials
+
+ containerImages:
+ imagePullSecrets: []
+ imagePullPolicy: Always
+ gerritImages:
+ registry: docker.io
+ org: k8sgerrit
+ tag: latest
+ busyBox:
+ registry: docker.io
+ tag: latest
+
+ storage:
+ storageClasses:
+ readWriteOnce: default
+ readWriteMany: shared-storage
+ nfsWorkaround:
+ enabled: false
+ chownOnStartup: false
+ idmapdConfig: |-
+ [General]
+ Verbosity = 0
+ Domain = localdomain.com
+
+ [Mapping]
+ Nobody-User = nobody
+ Nobody-Group = nogroup
+
+ sharedStorage:
+ externalPVC:
+ enabled: false
+ claimName: ""
+ size: 1Gi
+ volumeName: ""
+ selector:
+ matchLabels:
+ volume-type: ssd
+ aws-availability-zone: us-east-1
+
+ ingress:
+ host: example.com
+ tlsEnabled: false
+```
+
+## GitGarbageCollection
+
+---
+
+**Group**: gerritoperator.google.com \
+**Version**: v1alpha1 \
+**Kind**: GitGarbageCollection
+
+---
+
+
+| Field | Type | Description |
+|---|---|---|
+| `apiVersion` | `String` | APIVersion of this resource |
+| `kind` | `String` | Kind of this resource |
+| `metadata` | [`ObjectMeta`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta) | Metadata of the resource |
+| `spec` | [`GitGarbageCollectionSpec`](#gitgarbagecollectionspec) | Specification for GitGarbageCollection |
+| `status` | [`GitGarbageCollectionStatus`](#gitgarbagecollectionstatus) | Status for GitGarbageCollection |
+
+Example:
+
+```yaml
+apiVersion: "gerritoperator.google.com/v1alpha1"
+kind: GitGarbageCollection
+metadata:
+ name: gitgc
+spec:
+ cluster: gerrit
+ schedule: "*/5 * * * *"
+
+ projects: []
+
+ resources:
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ limits:
+ cpu: 100m
+ memory: 256Mi
+
+ tolerations:
+ - key: key1
+ operator: Equal
+ value: value1
+ effect: NoSchedule
+
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: disktype
+ operator: In
+ values:
+ - ssd
+```
+
+## GerritNetwork
+
+---
+
+**Group**: gerritoperator.google.com \
+**Version**: v1alpha2 \
+**Kind**: GerritNetwork
+
+---
+
+
+| Field | Type | Description |
+|---|---|---|
+| `apiVersion` | `String` | APIVersion of this resource |
+| `kind` | `String` | Kind of this resource |
+| `metadata` | [`ObjectMeta`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta) | Metadata of the resource |
+| `spec` | [`GerritNetworkSpec`](#gerritnetworkspec) | Specification for GerritNetwork |
+
+Example:
+
+```yaml
+apiVersion: "gerritoperator.google.com/v1alpha2"
+kind: GerritNetwork
+metadata:
+ name: gerrit-network
+spec:
+ ingress:
+ enabled: true
+ host: example.com
+ annotations: {}
+ tls:
+ enabled: false
+ secret: ""
+ receiver:
+ name: receiver
+ httpPort: 80
+ primaryGerrit: {}
+ # name: gerrit-primary
+ # httpPort: 80
+ # httpPort: 29418
+ gerritReplica:
+ name: gerrit
+ httpPort: 80
+ httpPort: 29418
+```
+
+## GerritClusterSpec
+
+| Field | Type | Description |
+|---|---|---|
+| `storage` | [`GerritStorageConfig`](#gerritstorageconfig) | Storage used by Gerrit instances |
+| `containerImages` | [`ContainerImageConfig`](#containerimageconfig) | Container images used inside GerritCluster |
+| `ingress` | [`GerritClusterIngressConfig`](#gerritclusteringressconfig) | Ingress traffic handling in GerritCluster |
+| `refdb` | [`GlobalRefDbConfig`](#globalrefdbconfig) | The Global RefDB used by Gerrit |
+| `serverId` | `String` | The serverId to be used for all Gerrit instances (default: `<namespace>/<name>`) |
+| `gerrits` | [`GerritTemplate`](#gerrittemplate)-Array | A list of Gerrit instances to be installed in the GerritCluster. Only a single primary Gerrit and a single Gerrit Replica is permitted. |
+| `receiver` | [`ReceiverTemplate`](#receivertemplate) | A Receiver instance to be installed in the GerritCluster. |
+
+## GerritClusterStatus
+
+| Field | Type | Description |
+|---|---|---|
+| `members` | `Map<String, List<String>>` | A map listing all Gerrit and Receiver instances managed by the GerritCluster by name |
+
+## StorageConfig
+
+| Field | Type | Description |
+|---|---|---|
+| `storageClasses` | [`StorageClassConfig`](#storageclassconfig) | StorageClasses used in the GerritCluster |
+| `sharedStorage` | [`SharedStorage`](#sharedstorage) | Volume used for resources shared between Gerrit instances except git repositories |
+
+## GerritStorageConfig
+
+Extends [StorageConfig](#StorageConfig).
+
+| Field | Type | Description |
+|---|---|---|
+| `pluginCache` | [`PluginCacheConfig`](#plugincacheconfig) | Configuration of cache for downloaded plugins |
+
+## StorageClassConfig
+
+| Field | Type | Description |
+|---|---|---|
+| `readWriteOnce` | `String` | Name of a StorageClass allowing ReadWriteOnce access. (default: `default`) |
+| `readWriteMany` | `String` | Name of a StorageClass allowing ReadWriteMany access. (default: `shared-storage`) |
+| `nfsWorkaround` | [`NfsWorkaroundConfig`](#nfsworkaroundconfig) | NFS is not well supported by Kubernetes. These options provide a workaround to ensure correct file ownership and id mapping |
+
+## NfsWorkaroundConfig
+
+| Field | Type | Description |
+|---|---|---|
+| `enabled` | `boolean` | If enabled, below options might be used. (default: `false`) |
+| `chownOnStartup` | `boolean` | If enabled, the ownership of the mounted NFS volumes will be set on pod startup. Note that this is not done recursively. It is expected that all data already present in the volume was created by the user used in accessing containers. (default: `false`) |
+| `idmapdConfig` | `String` | The idmapd.config file can be used to e.g. configure the ID domain. This might be necessary for some NFS servers to ensure correct mapping of user and group IDs. (optional) |
+
+## SharedStorage
+
+| Field | Type | Description |
+|---|---|---|
+| `externalPVC` | [`ExternalPVCConfig`](#externalpvcconfig) | Configuration regarding the use of an external / manually created PVC |
+| `size` | [`Quantity`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#quantity-resource-core) | Size of the volume (mandatory) |
+| `volumeName` | `String` | Name of a specific persistent volume to claim (optional) |
+| `selector` | [`LabelSelector`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#labelselector-v1-meta) | Selector to select a specific persistent volume (optional) |
+
+## PluginCacheConfig
+
+| Field | Type | Description |
+|---|---|---|
+| `enabled` | `boolean` | If enabled, downloaded plugins will be cached. (default: `false`) |
+
+## ExternalPVCConfig
+
+| Field | Type | Description |
+|---|---|---|
+| `enabled` | `boolean` | If enabled, a provided PVC will be used instead of creating one. (default: `false`) |
+| `claimName` | `String` | Name of the PVC to be used. |
+
+## ContainerImageConfig
+
+| Field | Type | Description |
+|---|---|---|
+| `imagePullPolicy` | `String` | Image pull policy (https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to be used in all containers. (default: `Always`) |
+| `imagePullSecrets` | [`LocalObjectReference`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#localobjectreference-v1-core)-Array | List of names representing imagePullSecrets available in the cluster. These secrets will be added to all pods. (optional) |
+| `busyBox` | [`BusyBoxImage`](#busyboximage) | The busybox container is used for some init containers |
+| `gerritImages` | [`GerritRepositoryConfig`](#gerritrepositoryconfig) | The container images in this project are tagged with the output of git describe. All container images are published for each version, even when the image itself was not updated. This ensures that all containers work well together. Here, the data on how to get those images can be configured. |
+
+## BusyBoxImage
+
+| Field | Type | Description |
+|---|---|---|
+| `registry` | `String` | The registry from which to pull the "busybox" image. (default: `docker.io`) |
+| `tag` | `String` | The tag/version of the "busybox" image. (default: `latest`) |
+
+## GerritRepositoryConfig
+
+| Field | Type | Description |
+|---|---|---|
+| `registry` | `String` | The registry from which to pull the images. (default: `docker.io`) |
+| `org` | `String` | The organization in the registry containing the images. (default: `k8sgerrit`) |
+| `tag` | `String` | The tag/version of the images. (default: `latest`) |
+
+## GerritClusterIngressConfig
+
+| Field | Type | Description |
+|---|---|---|
+| `enabled` | `boolean` | Whether to configure an ingress provider to manage the ingress traffic in the GerritCluster (default: `false`) |
+| `host` | `string` | Hostname to be used by the ingress. For each Gerrit deployment a new subdomain using the name of the respective Gerrit CustomResource will be used. |
+| `annotations` | `Map<String, String>` | Annotations to be set for the ingress. This allows to configure the ingress further by e.g. setting the ingress class. This will be only used for type INGRESS and ignored otherwise. (optional) |
+| `tls` | [`GerritIngressTlsConfig`](#gerritingresstlsconfig) | Configuration of TLS to be used in the ingress |
+| `ambassador` | [`GerritIngressAmbassadorConfig`](#gerritingressambassadorconfig) | Ambassador configuration. Only relevant when the INGRESS environment variable is set to "ambassador" in the operator |
+
+## GerritIngressTlsConfig
+
+| Field | Type | Description |
+|---|---|---|
+| `enabled` | `boolean` | Whether to use TLS (default: `false`) |
+| `secret` | `String` | Name of the secret containing the TLS key pair. The certificate should be a wildcard certificate allowing for all subdomains under the given host. |
+
+## GerritIngressAmbassadorConfig
+
+| Field | Type | Description |
+|---|---|---|
+| `id` | `List<String>` | The operator uses the ids specified in `ambassadorId` to set the [ambassador_id](https://www.getambassador.io/docs/edge-stack/1.14/topics/running/running#ambassador_id) spec field in the Ambassador CustomResources it creates (`Mapping`, `TLSContext`). (optional) |
+| `createHost`| `boolean` | Specify whether you want the operator to create a `Host` resource. This will be required if you don't have a wildcard host set up in your cluster. Default is `false`. (optional) |
+
+## GlobalRefDbConfig
+
+Note, that the operator will not deploy or operate the database used for the
+global refdb. It will only configure Gerrit to use it.
+
+| Field | Type | Description |
+|---|---|---|
+| `database` | [`RefDatabase`](#refdatabase) | Which database to use for the global refdb. Choices: `NONE`, `SPANNER`, `ZOOKEEPER`. (default: `NONE`) |
+| `spanner` | [`SpannerRefDbConfig`](#spannerrefdbconfig) | Configuration of spanner. Only used if spanner was configured to be used for the global refdb. |
+| `zookeeper` | [`ZookeeperRefDbConfig`](#zookeeperrefdbconfig) | Configuration of zookeeper. Only used, if zookeeper was configured to be used for the global refdb. |
+
+## RefDatabase
+
+| Value | Description|
+|---|---|
+| `NONE` | No global refdb will be used. Not allowed, if a primary Gerrit with 2 or more instances will be installed. |
+| `SPANNER` | Spanner will be used as a global refdb |
+| `ZOOKEEPER` | Zookeeper will be used as a global refdb |
+
+## SpannerRefDbConfig
+
+Note that the spanner ref-db plugin requires google credentials to be mounted to /var/gerrit/etc/gcp-credentials.json. Instructions for generating those credentials can be found [here](https://developers.google.com/workspace/guides/create-credentials) and may be provided in the optional secretRef in [`GerritTemplateSpec`](#gerrittemplatespec).
+
+| Field | Type | Description |
+|---|---|---|
+| `projectName` | `String` | Spanner project name to be used |
+| `instance` | `String` | Spanner instance name to be used |
+| `database` | `String` | Spanner database name to be used |
+
+## ZookeeperRefDbConfig
+
+| Field | Type | Description |
+|---|---|---|
+| `connectString` | `String` | Hostname and port of the zookeeper instance to be used, e.g. `zookeeper.example.com:2181` |
+| `rootNode` | `String` | Root node that will be used to store the global refdb data. Will be set automatically, if `GerritCluster` is being used. |
+
+## GerritTemplate
+
+| Field | Type | Description |
+|---|---|---|
+| `metadata` | [`ObjectMeta`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta) | Metadata of the resource. A name is mandatory. Labels can optionally be defined. Other fields like the namespace are ignored. |
+| `spec` | [`GerritTemplateSpec`](#gerrittemplatespec) | Specification for GerritTemplate |
+
+## GerritTemplateSpec
+
+| Field | Type | Description |
+|---|---|---|
+| `serviceAccount` | `String` | ServiceAccount to be used by Gerrit. Required for service discovery when using the high-availability plugin |
+| `tolerations` | [`Toleration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core)-Array | Pod tolerations (optional) |
+| `affinity` | [`Affinity`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core) | Pod affinity (optional) |
+| `topologySpreadConstraints` | [`TopologySpreadConstraint`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#topologyspreadconstraint-v1-core)-Array | Pod topology spread constraints (optional) |
+| `priorityClassName` | `String` | [PriorityClass](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/) to be used with the pod (optional) |
+| `replicas` | `int` | Number of pods running Gerrit in the StatefulSet (default: 1) |
+| `updatePartition` | `int` | Ordinal at which to start updating pods. Pods with a lower ordinal will not be updated. (default: 0) |
+| `resources` | [`ResourceRequirements`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#resourcerequirements-v1-core) | Resource requirements for the Gerrit container |
+| `startupProbe` | [`GerritProbe`](#gerritprobe) | [Startup probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). The action will be set by the operator. All other probe parameters can be set. |
+| `readinessProbe` | [`GerritProbe`](#gerritprobe) | [Readiness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). The action will be set by the operator. All other probe parameters can be set. |
+| `livenessProbe` | [`GerritProbe`](#gerritprobe) | [Liveness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). The action will be set by the operator. All other probe parameters can be set. |
+| `gracefulStopTimeout` | `long` | Seconds the pod is allowed to shutdown until it is forcefully killed (default: 30) |
+| `service` | [`GerritServiceConfig`](#gerritserviceconfig) | Configuration for the service used to manage network access to the StatefulSet |
+| `site` | [`GerritSite`](#gerritsite) | Configuration concerning the Gerrit site directory |
+| `plugins` | [`GerritPlugin`](#gerritplugin)-Array | List of Gerrit plugins to install. These plugins can either be packaged in the Gerrit war-file or they will be downloaded. (optional) |
+| `libs` | [`GerritModule`](#gerritmodule)-Array | List of Gerrit library modules to install. These lib modules will be downloaded. (optional) |
+| `configFiles` | `Map<String, String>` | Configuration files for Gerrit that will be mounted into the Gerrit site's etc-directory (gerrit.config is mandatory) |
+| `secretRef` | `String` | Name of secret containing configuration files, e.g. secure.config, that will be mounted into the Gerrit site's etc-directory (optional) |
+| `mode` | [`GerritMode`](#gerritmode) | In which mode Gerrit should be run. (default: PRIMARY) |
+| `debug` | [`GerritDebugConfig`](#gerritdebugconfig) | Enable the debug-mode for Gerrit |
+
+## GerritProbe
+
+**Extends:** [`Probe`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#probe-v1-core)
+
+The fields `exec`, `grpc`, `httpGet` and `tcpSocket` cannot be set manually anymore
+compared to the parent object. All other options can still be configured.
+
+## GerritServiceConfig
+
+| Field | Type | Description |
+|---|---|---|
+| `type` | `String` | Service type (default: `NodePort`) |
+| `httpPort` | `int` | Port used for HTTP requests (default: `80`) |
+| `sshPort` | `Integer` | Port used for SSH requests (optional; if unset, SSH access is disabled). If Istio is used, the Gateway will be automatically configured to accept SSH requests. If an Ingress controller is used, SSH requests will only be served by the Service itself! |
+
+## GerritSite
+
+| Field | Type | Description |
+|---|---|---|
+| `size` | [`Quantity`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#quantity-resource-core) | Size of the volume used to persist not otherwise persisted site components (e.g. git repositories are persisted in a dedicated volume) (mandatory) |
+
+## GerritModule
+
+| Field | Type | Description |
+|---|---|---|
+| `name` | `String` | Name of the module/plugin |
+| `url` | `String` | URL of the module/plugin, if it should be downloaded. If the URL is not set, the plugin is expected to be packaged in the war-file (not possible for lib-modules). (optional) |
+| `sha1` | `String` | SHA1-checksum of the module/plugin JAR-file. (mandatory, if `url` is set) |
+
+## GerritPlugin
+
+**Extends:** [`GerritModule`](#gerritmodule)
+
+| Field | Type | Description |
+|---|---|---|
+| `installAsLibrary` | `boolean` | Some plugins also need to be installed as a library. If set to `true` the plugin JAR will be symlinked to the `lib`-directory in the Gerrit site. (default: `false`) |
+
+## GerritMode
+
+| Value | Description|
+|---|---|
+| `PRIMARY` | A primary Gerrit |
+| `REPLICA` | A Gerrit Replica, which only serves git fetch/clone requests |
+
+## GerritDebugConfig
+
+These options allow to debug Gerrit. It will enable debugging in all pods and
+expose the port 8000 in the container. Port-forwarding is required to connect the
+debugger.
+Note, that all pods will be restarted to enable the debugger. Also, if `suspend`
+is enabled, ensure that the lifecycle probes are configured accordingly to prevent
+pod restarts before Gerrit is ready.
+
+| Field | Type | Description |
+|---|---|---|
+| `enabled` | `boolean` | Whether to enable debugging. (default: `false`) |
+| `suspend` | `boolean` | Whether to suspend Gerrit on startup. (default: `false`) |
+
+## GerritSpec
+
+**Extends:** [`GerritTemplateSpec`](#gerrittemplatespec)
+
+| Field | Type | Description |
+|---|---|---|
+| `storage` | [`GerritStorageConfig`](#gerritstorageconfig) | Storage used by Gerrit instances |
+| `containerImages` | [`ContainerImageConfig`](#containerimageconfig) | Container images used inside GerritCluster |
+| `ingress` | [`IngressConfig`](#ingressconfig) | Ingress configuration for Gerrit |
+| `refdb` | [`GlobalRefDbConfig`](#globalrefdbconfig) | The Global RefDB used by Gerrit |
+| `serverId` | `String` | The serverId to be used for all Gerrit instances |
+
+## GerritStatus
+
+| Field | Type | Description |
+|---|---|---|
+| `ready` | `boolean` | Whether the Gerrit instance is ready |
+| `appliedConfigMapVersions` | `Map<String, String>` | Versions of each ConfigMap currently mounted into Gerrit pods |
+| `appliedSecretVersions` | `Map<String, String>` | Versions of each secret currently mounted into Gerrit pods |
+
+## IngressConfig
+
+| Field | Type | Description |
+|---|---|---|
+| `host` | `string` | Hostname that is being used by the ingress provider for this Gerrit instance. |
+| `tlsEnabled` | `boolean` | Whether the ingress provider enables TLS. (default: `false`) |
+
+## ReceiverTemplate
+
+| Field | Type | Description |
+|---|---|---|
+| `metadata` | [`ObjectMeta`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta) | Metadata of the resource. A name is mandatory. Labels can optionally be defined. Other fields like the namespace are ignored. |
+| `spec` | [`ReceiverTemplateSpec`](#receivertemplatespec) | Specification for ReceiverTemplate |
+
+## ReceiverTemplateSpec
+
+| Field | Type | Description |
+|---|---|---|
+| `tolerations` | [`Toleration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core)-Array | Pod tolerations (optional) |
+| `affinity` | [`Affinity`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core) | Pod affinity (optional) |
+| `topologySpreadConstraints` | [`TopologySpreadConstraint`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#topologyspreadconstraint-v1-core)-Array | Pod topology spread constraints (optional) |
+| `priorityClassName` | `String` | [PriorityClass](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/) to be used with the pod (optional) |
+| `replicas` | `int` | Number of pods running the receiver in the Deployment (default: 1) |
+| `maxSurge` | `IntOrString` | Ordinal or percentage of pods that are allowed to be created in addition during rolling updates. (default: `1`) |
+| `maxUnavailable` | `IntOrString` | Ordinal or percentage of pods that are allowed to be unavailable during rolling updates. (default: `1`) |
+| `resources` | [`ResourceRequirements`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#resourcerequirements-v1-core) | Resource requirements for the Receiver container |
+| `readinessProbe` | [`ReceiverProbe`](#receiverprobe) | [Readiness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). The action will be set by the operator. All other probe parameters can be set. |
+| `livenessProbe` | [`ReceiverProbe`](#receiverprobe) | [Liveness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). The action will be set by the operator. All other probe parameters can be set. |
+| `service` | [`ReceiverServiceConfig`](#receiverserviceconfig) | Configuration for the service used to manage network access to the Deployment |
+| `credentialSecretRef` | `String` | Name of the secret containing the .htpasswd file used to configure basic authentication within the Apache server (mandatory) |
+
+## ReceiverSpec
+
+**Extends:** [`ReceiverTemplateSpec`](#receivertemplatespec)
+
+| Field | Type | Description |
+|---|---|---|
+| `storage` | [`StorageConfig`](#storageconfig) | Storage used by Gerrit/Receiver instances |
+| `containerImages` | [`ContainerImageConfig`](#containerimageconfig) | Container images used inside GerritCluster |
+| `ingress` | [`IngressConfig`](#ingressconfig) | Ingress configuration for Gerrit |
+
+## ReceiverStatus
+
+| Field | Type | Description |
+|---|---|---|
+| `ready` | `boolean` | Whether the Receiver instance is ready |
+| `appliedCredentialSecretVersion` | `String` | Version of credential secret currently mounted into Receiver pods |
+
+## ReceiverProbe
+
+**Extends:** [`Probe`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#probe-v1-core)
+
+The fields `exec`, `grpc`, `httpGet` and `tcpSocket` cannot be set manually anymore
+compared to the parent object. All other options can still be configured.
+
+## ReceiverServiceConfig
+
+| Field | Type | Description |
+|---|---|---|
+| `type` | `String` | Service type (default: `NodePort`) |
+| `httpPort` | `int` | Port used for HTTP requests (default: `80`) |
+
+## GitGarbageCollectionSpec
+
+| Field | Type | Description |
+|---|---|---|
+| `cluster` | `string` | Name of the Gerrit cluster this Gerrit is a part of. (mandatory) |
+| `tolerations` | [`Toleration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core)-Array | Pod tolerations (optional) |
+| `affinity` | [`Affinity`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core) | Pod affinity (optional) |
+| `schedule` | `string` | Cron schedule defining when to run git gc (mandatory) |
+| `projects` | `Set<String>` | List of projects to gc. If omitted, all projects not handled by other Git GC jobs will be gc'ed. Only one job gc'ing all projects can exist. (default: `[]`) |
+| `resources` | [`ResourceRequirements`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#resourcerequirements-v1-core) | Resource requirements for the GitGarbageCollection container |
+
+## GitGarbageCollectionStatus
+
+| Field | Type | Description |
+|---|---|---|
+| `replicateAll` | `boolean` | Whether this GitGarbageCollection handles all projects |
+| `excludedProjects` | `Set<String>` | List of projects that were excluded from this GitGarbageCollection, since they are handled by other Jobs |
+| `state` | [`GitGcState`](#gitgcstate) | State of the GitGarbageCollection |
+
+## GitGcState
+
+| Value | Description|
+|---|---|
+| `ACTIVE` | GitGarbageCollection is scheduled |
+| `INACTIVE` | GitGarbageCollection is not scheduled |
+| `CONFLICT` | GitGarbageCollection conflicts with another GitGarbageCollection |
+| `ERROR` | Controller failed to schedule GitGarbageCollection |
+
+## GerritNetworkSpec
+
+| Field | Type | Description |
+|---|---|---|
+| `ingress` | [`GerritClusterIngressConfig`](#gerritclusteringressconfig) | Ingress traffic handling in GerritCluster |
+| `receiver` | [`NetworkMember`](#networkmember) | Receiver in the network. |
+| `primaryGerrit` | [`NetworkMemberWithSsh`](#networkmemberwithssh) | Primary Gerrit in the network. |
+| `gerritReplica` | [`NetworkMemberWithSsh`](#networkmemberwithssh) | Gerrit Replica in the network. |
+
+## NetworkMember
+
+| Field | Type | Description |
+|------------|----------|----------------------------|
+| `name` | `String` | Name of the network member |
+| `httpPort` | `int` | Port used for HTTP(S) |
+
+## NetworkMemberWithSsh
+
+**Extends:** [`NetworkMember`](#networkmember)
+
+| Field | Type | Description |
+|-----------|-------|-------------------|
+| `sshPort` | `int` | Port used for SSH |
diff --git a/charts/k8s-gerrit/Documentation/operator.md b/charts/k8s-gerrit/Documentation/operator.md
new file mode 100644
index 0000000..919e217
--- /dev/null
+++ b/charts/k8s-gerrit/Documentation/operator.md
@@ -0,0 +1,440 @@
+# Gerrit Operator
+
+1. [Gerrit Operator](#gerrit-operator)
+ 1. [Build](#build)
+ 2. [Versioning](#versioning)
+ 3. [Publish](#publish)
+ 4. [Tests](#tests)
+ 5. [Prerequisites](#prerequisites)
+ 1. [Shared Storage (ReadWriteMany)](#shared-storage-readwritemany)
+ 2. [Ingress provider](#ingress-provider)
+ 6. [Deploy](#deploy)
+ 1. [Using helm charts](#using-helm-charts)
+ 1. [gerrit-operator-crds](#gerrit-operator-crds)
+ 2. [gerrit-operator](#gerrit-operator-1)
+ 2. [Without the helm charts](#without-the-helm-charts)
+ 7. [CustomResources](#customresources)
+ 1. [GerritCluster](#gerritcluster)
+ 2. [Gerrit](#gerrit)
+ 3. [GitGarbageCollection](#gitgarbagecollection)
+ 4. [Receiver](#receiver)
+ 5. [GerritNetwork](#gerritnetwork)
+ 8. [Configuration of Gerrit](#configuration-of-gerrit)
+
+## Build
+
+For this step, you need Java 11 and Maven installed.
+
+To build all components of the operator run:
+
+```sh
+cd operator
+mvn clean install
+```
+
+This step compiles the Java source code into `.class` bytecode files in a newly
+generated `operator/target` folder. A `gerrit-operator` image is also created
+locally. Moreover, the CRD helm chart is updated with the latest CRDs as part of
+this build step.
+
+The jar-version and container image tag can be set using the `revision` property:
+
+```sh
+mvn clean install -Drevision=$(git describe --always --dirty)
+```
+
+## Versioning
+
+The Gerrit Operator is still in an early state of development. The operator is
+thus at the moment not semantically versioned. The CustomResources are as of now
+independently versioned, i.e. the `GerritCluster` resource can have a different
+version than the `GitGarbageCollection` resource, although they are in the same
+group. At the moment, only the current version will be supported by the operator,
+i.e. there won't be a migration path. As soon as the API reaches some stability,
+this will change.
+
+## Publish
+
+Currently, there does not exist a container image for the operator in the
+`docker.io/k8sgerrit` registry. You must build your own image in order to run
+the operator in your cluster. To publish the container image of the Gerrit
+Operator:
+
+1. Update the `docker.registry` and `docker.org` tags in the `operator/pom.xml`
+file to point to your own Docker registry and org that you have permissions to
+push to.
+
+```xml
+<docker.registry>my-registry</docker.registry>
+<docker.org>my-org</docker.org>
+```
+
+2. run the following commands:
+
+```sh
+cd operator
+mvn clean install -P publish
+```
+
+This will build the operator source code, create an image out of the
+built artifacts, and publish this image to the registry specified in the
+`pom.xml` file. The built image is multi-platform - it will run on both `amd64`
+and `arm64` architectures. It is okay to run this build command from an ARM
+Mac.
+
+## Tests
+
+Executing the E2E tests has a few infrastructure requirements that have to be
+provided:
+
+- An (unused) Kubernetes cluster
+- The 'default' StorageClass that supports ReadWriteOnce access. It has to be
+ possible to provision volumes using this StorageClass.
+- A StorageClass that supports ReadWriteMany access. It has to be possible to
+ provision volumes using this StorageClass. Such a StorageClass could be provided
+ by the [NFS-subdir-provisioner chart](https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner).
+- An [Nginx Ingress Controller](https://github.com/kubernetes/ingress-nginx)
+- An installation of [OpenLDAP](../supplements/test-cluster/ldap/openldap.yaml)
+ with at least one user.
+- Istio installed with the [profile](../istio/gerrit.profile.yaml) provided by
+ this project
+- A secret containing valid certificates for the given hostnames. For istio this
+ secret has to be named `tls-secret` and be present in the `istio-system` namespace.
+ For the Ingress controller, the secret has to be either set as the default
+ secret to be used or somehow automatically be provided in the namespaces created
+ by the tests and named `tls-secret`, e.g. by using Gardener to manage DNS and
+ certificates.
+
+A sample setup for components required in the cluster is provided under
+`$REPO_ROOT/supplements/test-cluster`. Some configuration has to be done manually
+(marked by `#TODO`) and the `deploy.sh`-script can be used to install/update all
+components.
+
+In addition, some properties have to be set to configure the tests:
+
+- `rwmStorageClass`: Name of the StorageClass providing RWM-access (default:nfs-client)
+- `registry`: Registry to pull container images from
+- `RegistryOrg`: Organization of the container images
+- `tag`: Container tag
+- `registryUser`: User for the container registry
+- `registryPwd`: Password for the container registry
+- `ingressDomain`: Domain to be used for the ingress
+- `istioDomain`: Domain to be used for istio
+- `ldapAdminPwd`: Admin password for LDAP server
+- `gerritUser`: Username of a user in LDAP
+- `gerritPwd`: The password of `gerritUser`
+
+The properties should be set in the `test.properties` file. Alternatively, a
+path of a properties file can be configured by using the
+`-Dproperties=<path to properties file>`-option.
+
+To run all E2E tests, use:
+
+```sh
+cd operator
+mvn clean install -P integration-test -Dproperties=<path to properties file>
+```
+
+Note, that running the E2E tests will also involve pushing the container image
+to the repository configured in the properties file.
+
+## Prerequisites
+
+Deploying Gerrit using the operator requires some additional prerequisites to be
+fulfilled:
+
+### Shared Storage (ReadWriteMany)
+
+Gerrit instances share the repositories and other data using shared volumes. Thus,
+a StorageClass and a suitable provisioner have to be available in the cluster.
+An example for such a provisioner would be the
+[NFS-subdir-external-provisioner](https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner).
+
+### Ingress provider
+
+The Gerrit Operator will also set up network routing rules and an ingress point
+for the Gerrit instances it manages. The network routing rules ensure that requests
+will be routed to the intended GerritCluster component, e.g. in case a primary
+Gerrit and a Gerrit Replica exist in the cluster, git fetch/clone requests will
+be sent to the Gerrit Replica and all other requests to the primary Gerrit.
+
+You may specify the ingress provider by setting the `INGRESS` environment
+variable in the operator Deployment manifest. That is, the choice of an ingress
+provider is an operator-level setting. However, you may specify some ingress
+configuration options (host, tls, etc) at the `GerritCluster` level, via
+[GerritClusterIngressConfig](operator-api-reference.md#gerritclusteringressconfig).
+
+The Gerrit Operator currently supports the following Ingress providers:
+
+- **NONE**
+
+ The operator will install no Ingress components. Services will still be available.
+ No prerequisites are required for this case.
+
+ If `spec.ingress.enabled` is set to `true` in GerritCluster, the operator will
+ still configure network related options like `http.listenUrl` in Gerrit based on
+ the other options in `spec.ingress`.
+
+- **INGRESS**
+
+ The operator will install an Ingress. Currently only the
+ [Nginx-Ingress-Controller](https://docs.nginx.com/nginx-ingress-controller/) is
+ supported, which will have to be installed in the cluster and has to be configured
+ to [allow snippet configurations](https://docs.nginx.com/nginx-ingress-controller/configuration/ingress-resources/advanced-configuration-with-snippets/).
+ An example of a working deployment can be found [here](../supplements/test-cluster/ingress/).
+
+ SSH support is not fully managed by the operator, since it has to be enabled and
+ [configured in the nginx ingress controller itself](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/).
+
+- **ISTIO**
+
+ The operator supports the use of [Istio](https://istio.io/) as a service mesh.
+ An example on how to set up Istio can be found [here](../istio/gerrit.profile.yaml).
+
+- **AMBASSADOR**
+
+ The operator also supports [Ambassador](https://www.getambassador.io/) for
+ setting up ingress to the Gerrits deployed by the operator. If you use
+ Ambassador's "Edge Stack" or "Emissary Ingress" to provide ingress to your k8s
+ services, you should set INGRESS=AMBASSADOR. Currently, SSH is not directly
+ supported when using INGRESS=AMBASSADOR.
+
+
+## Deploy
+You will need to have admin privileges for your k8s cluster in order to be able
+to deploy the following resources.
+
+You may choose to deploy the operator resources using helm, or directly via
+`kubectl apply`.
+
+### Using helm charts
+Make sure you have [helm](https://helm.sh/) installed in your environment.
+
+There are two relevant helm charts.
+
+#### gerrit-operator-crds
+
+This chart installs the CRDs (k8s API extensions) to your k8s cluster. No chart
+values need to be modified. The build initiated by the `mvn install` command
+from the [Publish](#publish) section includes a step that updates the CRDs in
+this helm chart to reflect any changes made to them in the operator source code.
+The CRDs installed are: GerritCluster, Gerrit, GitGarbageCollection, Receiver.
+
+You do not need to manually `helm install` this chart; this chart is installed
+as a dependency of the second `gerrit-operator` helm chart as described in the
+next subheading.
+
+#### gerrit-operator
+
+This chart installs the `gerrit-operator-crds` chart as a dependency, and the
+following k8s resources:
+- Deployment
+- ServiceAccount
+- ClusterRole
+- ClusterRoleBinding
+
+The operator itself creates a Service resource and a
+ValidationWebhookConfigurations resource behind the scenes.
+
+You will need to modify the values in `helm-charts/gerrit-operator/values.yaml`
+to point the chart to the registry/org that is hosting the Docker container
+image for the operator (from the [Publish](#publish) step earlier). Now,
+
+run:
+```sh
+# Create a namespace for the gerrit-operator
+kubectl create ns gerrit-operator
+
+# Build the gerrit-operator-crds chart and store it in the charts/ subdirectory
+helm dependency build helm-charts/gerrit-operator/
+
+# Install the gerrit-operator-crds chart and the gerrit-operator chart
+helm -n gerrit-operator install gerrit-operator helm-charts/gerrit-operator/
+```
+
+The chart itself, and all the bundled namespaced resources, are installed in the
+`gerrit-operator` namespace, as per the `-n` option in the helm command.
+
+### Without the helm charts
+
+First all CustomResourceDefinitions have to be deployed:
+
+```sh
+kubectl apply -f operator/target/classes/META-INF/fabric8/*-v1.yml
+```
+
+Note that these do not include the -v1beta1.yaml files, as those are for old
+Kubernetes versions.
+
+The operator requires a Java Keystore with a keypair inside to allow TLS
+verification for Kubernetes Admission Webhooks. To create a keystore and
+encode it with base64, run:
+
+```sh
+keytool \
+ -genkeypair \
+ -alias operator \
+ -keystore keystore \
+ -keyalg RSA \
+ -keysize 2048 \
+ -validity 3650
+cat keystore | base64 -b 0
+```
+
+Add the result to the Secret in `k8s/operator.yaml` (see comments in the file)
+and also add the base64-encoded password for the keystore to the secret.
+
+Then the operator and associated RBAC rules can be deployed:
+
+```sh
+kubectl apply -f operator/k8s/rbac.yaml
+kubectl apply -f operator/k8s/operator.yaml
+```
+
+`k8s/operator.yaml` contains a basic deployment of the operator. Resources,
+docker image name etc. might have to be adapted. For example, the ingress
+provider has to be configured by setting the `INGRESS` environment variable
+in `operator/k8s/operator.yaml` to either `NONE`, `INGRESS`, `ISTIO`, or
+`AMBASSADOR`.
+
+## CustomResources
+
+The operator manages several CustomResources that are described in more detail
+below.
+
+The API reference for all CustomResources can be found [here](operator-api-reference.md).
+
+### GerritCluster
+
+The GerritCluster CustomResource installs one or multiple Gerrit instances. The
+operator takes over managing the state of all Gerrit instances within the cluster
+and ensures that the state stays in sync. To this end it manages additional
+resources that are shared between Gerrit instances or are required to synchronize
+the state between Gerrit instances. These additional resources include:
+
+- storage
+- network / service mesh
+
+Installing Gerrit with the GerritCluster resource is highly recommended over using
+the [Gerrit](#gerrit) CustomResource directly, even if only a single deployment is
+installed, since this reduces the requirements that have to be managed manually.
+The same holds true for the [Receiver](#receiver) CustomResource, which without
+a Gerrit instance using the same site provides little value.
+
+For now, only a single Gerrit CustomResource using each [mode](./operator-api-reference.md#gerritmode)
+can be deployed in a GerritCluster, e.g. one primary Gerrit and one Gerrit Replica.
+The reason for that is, that there is currently no sharding implemented and thus
+multiple deployments don't bring any more value than just scaling the existing
+deployment. Instead of a primary Gerrit also a Receiver can be installed.
+
+### Gerrit
+
+The Gerrit CustomResource deploys a Gerrit, which can run in multiple modes.
+
+The Gerrit-CustomResource is mainly meant to be used by the GerritCluster-reconciler
+to install Gerrit-instances managed by a GerritCluster. Gerrit-CustomResources
+can however also be applied separately. Note, that the Gerrit operator will then
+not create any storage resources or setup any network resources in addition to
+the service.
+
+### GitGarbageCollection
+
+The GitGarbageCollection-CustomResource is used by the operator to set up CronJobs
+that regularly run Git garbage collection on the git repositories that are served
+by a GerritCluster.
+
+A GitGarbageCollection can either handle all repositories, if no specific repository
+is configured or a selected set of repositories. Multiple GitGarbageCollections
+can exist as part of the same GerritCluster, but no two GitGarbageCollections
+can work on the same project. This is prevented in three ways:
+
+- ValidationWebhooks will prohibit the creation of a second GitGarbageCollection
+ that does not specify projects, i.e. that would work on all projects.
+- Projects for which a GitGarbageCollections that specifically selects it exists
+ will be excluded from the GitGarbageCollection that works on all projects, if
+ it exists.
+- ValidationWebhooks will prohibit the creation of a GitGarbageCollection that
+ specifies a project that was already specified by another GitGarbageCollection.
+
+### Receiver
+
+**NOTE:** A Receiver should never be installed for a GerritCluster that is already
+managing a primary Gerrit to avoid conflicts when writing into repositories.
+
+The Receiver-CustomResource installs a Deployment running Apache with a git-http-
+backend that is meant to receive pushes performed by Gerrit's replication plugin.
+It can only be installed into a GerritCluster that does not include a primary
+Gerrit, but only Gerrit Replicas.
+
+The Receiver-CustomResource is mainly meant to be used by the GerritCluster-reconciler
+to install a Receiver-instance managed by a GerritCluster. Receiver-CustomResources
+can however also be applied separately. Note, that the Gerrit operator will then
+not create any storage resources or setup any network resources in addition to
+the service.
+
+### GerritNetwork
+
+The GerritNetwork CustomResource deploys network components depending on the
+configured ingress provider to enable ingress traffic to GerritCluster components.
+
+The GerritNetwork CustomResource is not meant to be installed manually, but will
+be created by the Gerrit Operator based on the GerritCluster CustomResource.
+
+## Configuration of Gerrit
+
+The operator takes care of all configuration in Gerrit that depends on the
+infrastructure, i.e. Kubernetes and the GerritCluster. This avoids duplicated
+configuration and misconfiguration.
+
+This means that some options in the gerrit.config are not allowed to be changed.
+If these values are set and are not matching the expected value, a ValidationWebhook
+will reject the resource creation/update. Thus, it is best to not set these values
+at all. To see which values the operator assigned check the ConfigMap created by
+the operator for the respective Gerrit.
+
+These options are:
+
+- `cache.directory`
+
+ This should stay in the volume mounted to contain the Gerrit site and will
+ thus be set to `cache`.
+
+- `container.javaHome`
+
+ This has to be set to `/usr/lib/jvm/java-11-openjdk-amd64`, since this is
+ the path of the Java installation in the container.
+
+- `container.javaOptions = -Djavax.net.ssl.trustStore`
+
+ The keystore will be mounted to `/var/gerrit/etc/keystore`.
+
+- `container.replica`
+
+ This has to be set in the Gerrit-CustomResource under `spec.isReplica`.
+
+- `container.user`
+
+ The technical user in the Gerrit container is called `gerrit`.
+
+- `gerrit.basePath`
+
+ The git repositories are mounted to `/var/gerrit/git` in the container.
+
+- `gerrit.canonicalWebUrl`
+
+ The canonical web URL has to be set to the hostname used by the Ingress/Istio.
+
+- `httpd.listenURL`
+
+ This has to be set to `proxy-http://*:8080/` or `proxy-https://*:8080`,
+ depending of TLS is enabled in the Ingress or not, otherwise the Jetty
+ servlet will run into an endless redirect loop.
+
+- `sshd.advertisedAddress`
+
+ This is only enforced, if Istio is enabled. It can be configured otherwise.
+
+- `sshd.listenAddress`
+
+ Since the container port for SSH is fixed, this will be set automatically.
+ If no SSH port is configured in the service, the SSHD is disabled.
diff --git a/charts/k8s-gerrit/Documentation/roadmap.md b/charts/k8s-gerrit/Documentation/roadmap.md
new file mode 100644
index 0000000..9af175a
--- /dev/null
+++ b/charts/k8s-gerrit/Documentation/roadmap.md
@@ -0,0 +1,207 @@
+# Roadmap
+
+## General
+
+### Planned features
+
+- **Automated verification process**: Run tests automatically to verify changes. \
+ \
+ Most tests in the project require a Kubernetes cluster and some additional
+ prerequisites, e.g. istio. Currently, the Gerrit OpenSOurce community does not
+ have these resources. At SAP, we plan to run verification in our internal systems,
+ which won't be publicly viewable, but could already vote. Builds would only
+ be triggered, if a maintainer votes `+1` on the `Build-Approved`-label. \
+ \
+ Builds can be moved to a public CI at a later point in time.
+
+- **Automated publishing of container images**: Publishing container images will
+ happen automatically on ref-updated using a CI.
+
+- **Support for multiple Gerrit versions**: All currently supported Gerrit versions
+ will also be supported in k8s-gerrit. \
+ \
+ Currently, container images used by this project are only published for a single
+ Gerrit version, which is updated on an irregular schedule. Introducing stable
+ branches for each gerrit version will allow to maintain container images for
+ multiple Gerrit versions. Gerrit binaries will be updated with each official
+ release and more frequently on `master`. This will be (at least partially)
+ automated.
+
+- **Integration test suite**: A test suite that can be used to test a GerritCluster. \
+ \
+ A GerritCluster running in a Kubernetes cluster consists of multiple components.
+ Having a suite of automated tests would greatly help to verify deployments in
+ development landscapes before going productive.
+
+## Gerrit Operator
+
+### Version 1.0
+
+#### Implemented features
+
+- **High-availability**: Primary Gerrit StatefulSets will have limited support for
+ horizontal scaling. \
+ \
+ Scaling has been enabled using the [high-availability plugin](https://gerrit.googlesource.com/plugins/high-availability/).
+ Primary Gerrits will run in Active/Active configuration. Currently, two primary
+ Gerrit instances, i.e. 2 pods in a StatefulSet, are supported
+
+- **Global RefDB support**: Global RefDB is required for Active/Active configurations
+ of multiple primary Gerrits. \
+ \
+ The [Global RefDB](https://gerrit.googlesource.com/modules/global-refdb) support
+ is required for high-availability as described in the previous point. The
+ Gerrit Operator automatically sets up Gerrit to use a Global RefDB
+ implementation. The following implementations are supported:
+ - [spanner-refdb](https://gerrit.googlesource.com/plugins/spanner-refdb)
+ - [zookeeper-refdb](https://gerrit.googlesource.com/plugins/zookeeper-refdb)
+
+ \
+ The Gerrit Operator does not set up the database used for the Global RefDB. It
+ does however manage plugin/module installation and configuration in Gerrit.
+
+- **Full support for Nginx**: The integration of Ingresses managed by the Nginx
+ ingress controller now supports automated routing. \
+ \
+ Instead of requiring users to use different subdomains for the different Gerrit
+ deployments in the GerritCluster, requests are now automatically routed to the
+ respective deployments. SSH has still to be set up manually, since this requires
+ setting up the routing in the Nginx ingress controller itself.
+
+#### Planned features
+
+- **Versioning of CRDs**: Provide migration paths between API changes in CRDs. \
+ \
+ At the moment updates to the CRD are done without providing a migration path.
+ This means a complete reinstallation of CRDS, Operator, CRs and dependent resources
+ is required. This is not acceptable in a productive environment. Thus,
+ the operator will always support the last two versions of each CRD, if applicable,
+ and provide a migration path between those versions.
+
+- **Log collection**: Support addition of sidecar running a log collection agent
+ to send logs of all components to some logging stack. \
+ \
+ Planned supported log collectors:
+ - [OpenTelemetry agent](https://opentelemetry.io/docs/collector/deployment/agent/)
+ - Option to add a custom sidecar
+
+- **Support for additional Ingress controllers**: Add support for setting up routing
+ configurations for additional Ingress controllers \
+ \
+ Additional ingress controllers might include:
+ - [Ambassador](https://www.getambassador.io/products/edge-stack/api-gateway)
+
+### Version 1.x
+
+#### Potential features
+
+- **Support for additional log collection agents**: \
+ \
+ Additional log collection agents might include:
+ - fluentbit
+ - Option to add a custom sidecar
+
+- **Additional ValidationWebhooks**: Proactively avoid unsupported configurations. \
+ \
+ ValidationWebhooks are already used to avoid accepting unsupported configurations,
+ e.g. deploying more than one primary Gerrit CustomResource per GerritCluster.
+ So far not all such cases are covered. Thus, the set of validations will be
+ further expanded.
+
+- **Better test coverage**: More tests are required to find bugs earlier.
+
+- **Automated reload of plugins**: Reload plugins on configuration change. \
+ \
+ Configuration changes in plugins typically don't require a restart of Gerrit,
+ but just to reload the plugin. To avoid unnecessary downtime of pods, the
+ Gerrit Operator will only reload affected plugins and not restart all pods, if
+ only the plugin's configuration changed.
+
+- **Externalized (re-)indexing**: Alleviate load caused by online reindexing. \
+ \
+ On large Gerrit sites online reindexing due to schema migrations `a)` or initialization `b)`
+ of a new site might take up to weeks and use a lot of resources, which might
+ cause performance issues. This is not acceptable in production. The current
+ plan to solve this issue is to implement a separate Gerrit deployment (GerritIndexer)
+ that is not exposed to clients and that takes over the task of online reindexing.
+ The GerritIndexer will mount the same repositories and will share events via
+ the high-availability plugin. However, it will access repositories in read-only
+ mode. \
+ This solves the above named scenarios as follows: \
+ \
+ a) **Schema migrations**: If a Gerrit update including a schema migration for
+ an index is applied, the Gerrit instances serving clients will be configured
+ to continue to use the old schema. Online reindexing will be disabled in
+ those instances. The GerritIndexer will have online reindexing enabled and
+ will start to build the new index version. As soon as it is finished, i.e.
+ it could start to use the new index version as read index, it will make a
+ copy of the new index and publish it, e.g. using a shared filesystem. A
+ restart of the Gerrit instances serving other clients will be triggered.
+ During this restart the new index will be copied into the site. Since there
+ may have been updated index entries since the new index version was published
+ indexing of entries updated in the meantime will be triggered. \
+ \
+ b) **Initialization of a new site**: If Gerrit is horizontally scaled, it will
+ be started with an empty index, i.e. it has to build the complete index. To
+ avoid this, the GerritIndexer deployment will continuously keep a copy of the
+ indexes up-to-date. It will regularly be stopped and a copy of the index will
+ be stored in a shared volume. This can be used as a base for new instances, which
+ then only have to update index entries that were changed in the meantime.
+
+- **Autoscaling**: Automatically scale Gerrit deployments based on usage. \
+ \
+ Metrics like available workers in the thread pools could be used to decide to
+ scale the Gerrit deployment horizontally. This would allow to dynamically adapt
+ to the current load. This helps to save costs and resources.
+
+### Version 2.0
+
+#### Potential features
+
+- **Multi region support**: Support setups that are distributed over multiple regions. \
+ \
+ Supporting Gerrit installations that are distributed over multiple regions would
+ allow to serve clients all over the world without large differences in latency
+ and would also improve availability and reduce the risks of data loss. \
+ Such a setup could be achieved by using the [multi-site setup](https://gerrit.googlesource.com/plugins/multi-site/).
+
+- **Remove the dependency on shared storage**: Use completely independent sites
+ instead of sharing a filesystem for some site components. \
+ \
+ NFS and other shared filesystems potentially might cause performance issues on
+ larger Gerrit installations due to latencies. A potential solution might be
+ to use the [multi-site setup](https://gerrit.googlesource.com/plugins/multi-site/)
+ to separate the sites of all instances and to use events and replication to
+ share the state
+
+- **Shared index**: Using an external centralized index, e.g. OpenSearch instead
+ of x copies of a Lucene index. \
+ \
+ Maintaining x copies of an index, where x is the number of Gerrit instances in
+ a gerritCluster, is unnecessarily expensive, since the same write transactions
+ have to be potentially done x times. Using a single centralized index would
+ resolve this issue.
+
+- **Shared cache**: Using an external centralized cache for all Gerrit instances. \
+ \
+ Using a single cache for all Gerrit instances will reduce the number of
+ computations for each Gerrit instance, since not every instance will have to
+ keep its own copy up-to-date.
+
+- **Sharding**: Shard a site based on repositories. \
+ \
+ Repositories served by a single GerritCluster might be quite diverse, e.g. ranging
+ from a few kilobytes to several gigabytes or repositories seeing high traffic
+ and other barely being fetched. It is not trivial to configure Gerrit to work
+ optimally for all repositories. Being able to shard at least the Gerrit Replicas
+ would help to optimally serve all repositories.
+
+## Helm charts
+
+Only limited support is planned for the `gerrit` and `gerrit-replica` helm-charts
+as soon as the Gerrit Operator reaches version 1.0. The reason is that the double
+maintenance of all features would not be feasible with the current number of
+contributors. The Gerrit Operator will support all features that are provided by
+the helm charts. If community members would like to adopt maintainership of the
+helm-charts, this would be very much appreciated and the helm-charts could then
+continued to be supported.
diff --git a/charts/k8s-gerrit/Jenkinsfile b/charts/k8s-gerrit/Jenkinsfile
new file mode 100644
index 0000000..c74570e
--- /dev/null
+++ b/charts/k8s-gerrit/Jenkinsfile
@@ -0,0 +1 @@
+k8sGerritPipeline()
diff --git a/charts/k8s-gerrit/LICENSE b/charts/k8s-gerrit/LICENSE
new file mode 100644
index 0000000..27bdfb6
--- /dev/null
+++ b/charts/k8s-gerrit/LICENSE
@@ -0,0 +1,317 @@
+
+```
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+```
+
+## Subcomponents
+
+This project includes the following subcomponents that are subject to separate
+license terms. Your use of these subcomponents is subject to the separate
+license terms applicable to each subcomponent.
+
+Docker-Py \
+https://github.com/docker/docker-py \
+Copyright 2016 Docker, Inc \
+Apache 2 license (https://github.com/docker/docker-py/blob/master/LICENSE)
+
+Kubernetes Python CLient \
+https://github.com/kubernetes-client/python \
+Copyright (c) 2014 The Kubernetes Authors \
+Apache 2 license (https://github.com/kubernetes-client/python/blob/master/LICENSE)
+
+Passlib \
+https://bitbucket.org/ecollins/passlib/wiki/Home \
+Copyright (c) 2008-2017 Assurance Technologies, LLC.
+All rights reserved. \
+3-Clause BSD License (https://passlib.readthedocs.io/en/stable/copyright.html)
+
+PyGit2 \
+https://github.com/libgit2/pygit2 \
+pygit2 is Copyright (C) the pygit2 contributors unless otherwise stated. \
+GPL2 (https://github.com/libgit2/pygit2/blob/master/COPYING)
+
+pyOpenSSL \
+https://github.com/pyca/pyopenssl \
+Copyright (c) 2001 The pyOpenSSL developers \
+Apache 2 license (https://github.com/pyca/pyopenssl/blob/master/LICENSE)
+
+PyTest \
+https://github.com/pytest-dev/pytest \
+Copyright (c) 2004-2017 Holger Krekel and others \
+MIT License (https://github.com/pytest-dev/pytest/blob/master/LICENSE)
+
+python-chromedriver-autoinstaller \
+https://github.com/yeongbin-jo/python-chromedriver-autoinstaller \
+Copyright (c) 2022 Yeongbin Jo \
+MIT License (https://github.com/yeongbin-jo/python-chromedriver-autoinstaller/blob/master/LICENSE)
+
+Requests \
+https://github.com/requests/requests \
+Copyright 2018 Kenneth Reitz \
+Apache 2 license (https://github.com/requests/requests/blob/master/LICENSE)
+
+Selenium \
+https://github.com/SeleniumHQ/selenium \
+Copyright 2022 Software Freedom Conservancy (SFC) \
+Apache 2 license (https://github.com/SeleniumHQ/selenium/blob/trunk/LICENSE)
+
+Ambassador \
+https://github.com/emissary-ingress/emissary \
+Copyright 2021 Ambassador Labs \
+Apache 2 license (https://github.com/emissary-ingress/emissary/blob/master/LICENSE)
+
+---
+## The MIT License (MIT)
+
+```
+Copyright <YEAR> <COPYRIGHT HOLDER>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+```
+
+## 3-Clause BSD License
+
+```
+Copyright <YEAR> <COPYRIGHT HOLDER>
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation and/or
+other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors
+may be used to endorse or promote products derived from this software without
+specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+```
diff --git a/charts/k8s-gerrit/Pipfile b/charts/k8s-gerrit/Pipfile
new file mode 100644
index 0000000..18ace64
--- /dev/null
+++ b/charts/k8s-gerrit/Pipfile
@@ -0,0 +1,26 @@
+[[source]]
+name = "pypi"
+url = "https://pypi.org/simple"
+verify_ssl = true
+
+[dev-packages]
+pylint = "~=2.17.5"
+black = "~=23.7.0"
+
+[packages]
+docker = "~=6.1.3"
+pytest = "~=7.4.0"
+passlib = "~=1.7.4"
+pyopenssl = "~=23.2.0"
+requests = "~=2.31.0"
+pytest-timeout = "~=2.1.0"
+kubernetes = "~=27.2.0"
+pygit2 = "~=1.12.2"
+selenium = "~=4.11.2"
+chromedriver-autoinstaller = "==0.6.2"
+
+[requires]
+python_version = "3.11"
+
+[pipenv]
+allow_prereleases = true
diff --git a/charts/k8s-gerrit/Pipfile.lock b/charts/k8s-gerrit/Pipfile.lock
new file mode 100644
index 0000000..9678c9a
--- /dev/null
+++ b/charts/k8s-gerrit/Pipfile.lock
@@ -0,0 +1,834 @@
+{
+ "_meta": {
+ "hash": {
+ "sha256": "db93e37abb75873f53120e5f4871bead84d2c21f587da243a9d7729d4ed00a55"
+ },
+ "pipfile-spec": 6,
+ "requires": {
+ "python_version": "3.11"
+ },
+ "sources": [
+ {
+ "name": "pypi",
+ "url": "https://pypi.org/simple",
+ "verify_ssl": true
+ }
+ ]
+ },
+ "default": {
+ "attrs": {
+ "hashes": [
+ "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04",
+ "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==23.1.0"
+ },
+ "cachetools": {
+ "hashes": [
+ "sha256:95ef631eeaea14ba2e36f06437f36463aac3a096799e876ee55e5cdccb102590",
+ "sha256:dce83f2d9b4e1f732a8cd44af8e8fab2dbe46201467fc98b3ef8f269092bf62b"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==5.3.1"
+ },
+ "certifi": {
+ "hashes": [
+ "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082",
+ "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"
+ ],
+ "markers": "python_version >= '3.6'",
+ "version": "==2023.7.22"
+ },
+ "cffi": {
+ "hashes": [
+ "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5",
+ "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef",
+ "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104",
+ "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426",
+ "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405",
+ "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375",
+ "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a",
+ "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e",
+ "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc",
+ "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf",
+ "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185",
+ "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497",
+ "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3",
+ "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35",
+ "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c",
+ "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83",
+ "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21",
+ "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca",
+ "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984",
+ "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac",
+ "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd",
+ "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee",
+ "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a",
+ "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2",
+ "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192",
+ "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7",
+ "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585",
+ "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f",
+ "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e",
+ "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27",
+ "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b",
+ "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e",
+ "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e",
+ "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d",
+ "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c",
+ "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415",
+ "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82",
+ "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02",
+ "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314",
+ "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325",
+ "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c",
+ "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3",
+ "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914",
+ "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045",
+ "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d",
+ "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9",
+ "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5",
+ "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2",
+ "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c",
+ "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3",
+ "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2",
+ "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8",
+ "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d",
+ "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d",
+ "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9",
+ "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162",
+ "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76",
+ "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4",
+ "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e",
+ "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9",
+ "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6",
+ "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b",
+ "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01",
+ "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"
+ ],
+ "version": "==1.15.1"
+ },
+ "charset-normalizer": {
+ "hashes": [
+ "sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96",
+ "sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c",
+ "sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710",
+ "sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706",
+ "sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020",
+ "sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252",
+ "sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad",
+ "sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329",
+ "sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a",
+ "sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f",
+ "sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6",
+ "sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4",
+ "sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a",
+ "sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46",
+ "sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2",
+ "sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23",
+ "sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace",
+ "sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd",
+ "sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982",
+ "sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10",
+ "sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2",
+ "sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea",
+ "sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09",
+ "sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5",
+ "sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149",
+ "sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489",
+ "sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9",
+ "sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80",
+ "sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592",
+ "sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3",
+ "sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6",
+ "sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed",
+ "sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c",
+ "sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200",
+ "sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a",
+ "sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e",
+ "sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d",
+ "sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6",
+ "sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623",
+ "sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669",
+ "sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3",
+ "sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa",
+ "sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9",
+ "sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2",
+ "sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f",
+ "sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1",
+ "sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4",
+ "sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a",
+ "sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8",
+ "sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3",
+ "sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029",
+ "sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f",
+ "sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959",
+ "sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22",
+ "sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7",
+ "sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952",
+ "sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346",
+ "sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e",
+ "sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d",
+ "sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299",
+ "sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd",
+ "sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a",
+ "sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3",
+ "sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037",
+ "sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94",
+ "sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c",
+ "sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858",
+ "sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a",
+ "sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449",
+ "sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c",
+ "sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918",
+ "sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1",
+ "sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c",
+ "sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac",
+ "sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa"
+ ],
+ "markers": "python_full_version >= '3.7.0'",
+ "version": "==3.2.0"
+ },
+ "chromedriver-autoinstaller": {
+ "hashes": [
+ "sha256:7055e3e5a64e4352855fafab15d266e2ed325620222224fb261a2131e821dfe3",
+ "sha256:8ff5c715160b294c9e7cc0fae5ecc5ccaff5563ca1405daed6b959cca606e57c"
+ ],
+ "index": "pypi",
+ "version": "==0.6.2"
+ },
+ "cryptography": {
+ "hashes": [
+ "sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306",
+ "sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84",
+ "sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47",
+ "sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d",
+ "sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116",
+ "sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207",
+ "sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81",
+ "sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087",
+ "sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd",
+ "sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507",
+ "sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858",
+ "sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae",
+ "sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34",
+ "sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906",
+ "sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd",
+ "sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922",
+ "sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7",
+ "sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4",
+ "sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574",
+ "sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1",
+ "sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c",
+ "sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e",
+ "sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==41.0.3"
+ },
+ "docker": {
+ "hashes": [
+ "sha256:aa6d17830045ba5ef0168d5eaa34d37beeb113948c413affe1d5991fc11f9a20",
+ "sha256:aecd2277b8bf8e506e484f6ab7aec39abe0038e29fa4a6d3ba86c3fe01844ed9"
+ ],
+ "index": "pypi",
+ "version": "==6.1.3"
+ },
+ "exceptiongroup": {
+ "hashes": [
+ "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9",
+ "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"
+ ],
+ "markers": "python_version < '3.11'",
+ "version": "==1.1.3"
+ },
+ "google-auth": {
+ "hashes": [
+ "sha256:164cba9af4e6e4e40c3a4f90a1a6c12ee56f14c0b4868d1ca91b32826ab334ce",
+ "sha256:d61d1b40897407b574da67da1a833bdc10d5a11642566e506565d1b1a46ba873"
+ ],
+ "markers": "python_version >= '3.6'",
+ "version": "==2.22.0"
+ },
+ "h11": {
+ "hashes": [
+ "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d",
+ "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==0.14.0"
+ },
+ "idna": {
+ "hashes": [
+ "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4",
+ "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"
+ ],
+ "markers": "python_version >= '3.5'",
+ "version": "==3.4"
+ },
+ "iniconfig": {
+ "hashes": [
+ "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3",
+ "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==2.0.0"
+ },
+ "kubernetes": {
+ "hashes": [
+ "sha256:0f9376329c85cf07615ed6886bf9bf21eb1cbfc05e14ec7b0f74ed8153cd2815",
+ "sha256:d479931c6f37561dbfdf28fc5f46384b1cb8b28f9db344ed4a232ce91990825a"
+ ],
+ "index": "pypi",
+ "version": "==27.2.0"
+ },
+ "oauthlib": {
+ "hashes": [
+ "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca",
+ "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"
+ ],
+ "markers": "python_version >= '3.6'",
+ "version": "==3.2.2"
+ },
+ "outcome": {
+ "hashes": [
+ "sha256:6f82bd3de45da303cf1f771ecafa1633750a358436a8bb60e06a1ceb745d2672",
+ "sha256:c4ab89a56575d6d38a05aa16daeaa333109c1f96167aba8901ab18b6b5e0f7f5"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==1.2.0"
+ },
+ "packaging": {
+ "hashes": [
+ "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61",
+ "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==23.1"
+ },
+ "passlib": {
+ "hashes": [
+ "sha256:aa6bca462b8d8bda89c70b382f0c298a20b5560af6cbfa2dce410c0a2fb669f1",
+ "sha256:defd50f72b65c5402ab2c573830a6978e5f202ad0d984793c8dde2c4152ebe04"
+ ],
+ "index": "pypi",
+ "version": "==1.7.4"
+ },
+ "pluggy": {
+ "hashes": [
+ "sha256:c2fd55a7d7a3863cba1a013e4e2414658b1d07b6bc57b3919e0c63c9abb99849",
+ "sha256:d12f0c4b579b15f5e054301bb226ee85eeeba08ffec228092f8defbaa3a4c4b3"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==1.2.0"
+ },
+ "pyasn1": {
+ "hashes": [
+ "sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57",
+ "sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde"
+ ],
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
+ "version": "==0.5.0"
+ },
+ "pyasn1-modules": {
+ "hashes": [
+ "sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c",
+ "sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d"
+ ],
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
+ "version": "==0.3.0"
+ },
+ "pycparser": {
+ "hashes": [
+ "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9",
+ "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"
+ ],
+ "version": "==2.21"
+ },
+ "pygit2": {
+ "hashes": [
+ "sha256:14ae27491347a0ac4bbe8347b09d752cfe7fea1121c14525415e0cca6db4a836",
+ "sha256:214bd214784fcbef7a8494d1d59e0cd3a731c0d24ce0f230dcc843322ee33b08",
+ "sha256:22e7f3ad2b7b0c80be991bb47d8a2f2535cc9bf090746eb8679231ee565fde81",
+ "sha256:25a6548930328c5247bfb7c67d29104e63b036cb5390f032d9f91f63efb70434",
+ "sha256:336c864ac961e7be8ba06e9ed8c999e4f624a8ccd90121cc4e40956d8b57acac",
+ "sha256:546091316c9a8c37b9867ddcc6c9f7402ca4d0b9db3f349212a7b5e71988e359",
+ "sha256:56e85d0e66de957d599d1efb2409d39afeefd8f01009bfda0796b42a4b678358",
+ "sha256:5b3ab4d6302990f7adb2b015bcbda1f0715277008d0c66440497e6f8313bf9cb",
+ "sha256:5c1e26649e1540b6a774f812e2fc9890320ff4d33f16db1bb02626318b5ceae2",
+ "sha256:5f65483ab5e3563c58f60debe2acc0979fdf6fd633432fcfbddf727a9a265ba4",
+ "sha256:685378852ef8eb081333bc80dbdfc4f1333cf4a8f3baf614c4135e02ad1ee38a",
+ "sha256:6a4083ba093c69142e0400114a4ef75e87834637d2bbfd77b964614bf70f624f",
+ "sha256:79fbd99d3e08ca7478150eeba28ca4d4103f564148eab8d00aba8f1e6fc60654",
+ "sha256:7bb30ab1fdaa4c30821fed33892958b6d92d50dbd03c76f7775b4e5d62f53a2e",
+ "sha256:857c5cde635d470f58803d67bfb281dc4f6336065a0253bfbed001f18e2d0767",
+ "sha256:8bf14196cbfffbcd286f459a1d4fc660c5d5dfa8fb422e21216961df575410d6",
+ "sha256:8da8517809635ea3da950d9cf99c6d1851352d92b6db309382db88a01c3b0bfd",
+ "sha256:8f443d3641762b2bb9c76400bb18beb4ba27dd35bc098a8bfae82e6a190c52ab",
+ "sha256:926f2e48c4eaa179249d417b8382290b86b0f01dbf41d289f763576209276b9f",
+ "sha256:a365ffca23d910381749fdbcc367db52fe808f9aa4852914dd9ef8b711384a32",
+ "sha256:ac2b5f408eb882e79645ebb43039ac37739c3edd25d857cc97d7482a684b613f",
+ "sha256:b9c2359b99eed8e7fac30c06e6b4ae277a6a0537d6b4b88a190828c3d7eb9ef2",
+ "sha256:be3bb0139f464947523022a5af343a2e862c4ff250a57ec9f631449e7c0ba7c0",
+ "sha256:c74e7601cb8b8dc3d02fd32274e200a7761cffd20ee531442bf1fa115c8f99a5",
+ "sha256:cdf655e5f801990f5cad721b6ccbe7610962f0a4f1c20373dbf9c0be39374a81",
+ "sha256:e7e705aaecad85b883022e81e054fbd27d26023fc031618ee61c51516580517e",
+ "sha256:ec04c27be5d5af1ceecdcc0464e07081222f91f285f156dc53b23751d146569a",
+ "sha256:f4df3e5745fdf3111a6ccc905eae99f22f1a180728f714795138ca540cc2a50a",
+ "sha256:f8f813d35d836c5b0d1962c387754786bcc7f1c3c8e11207b9eeb30238ac4cc7",
+ "sha256:fb9eb57b75ce586928053692a25aae2a50fef3ad36661c57c07d4902899b1df3",
+ "sha256:fe35a72af61961dbb7fb4abcdaa36d5f1c85b2cd3daae94137eeb9c07215cdd3"
+ ],
+ "index": "pypi",
+ "version": "==1.12.2"
+ },
+ "pyopenssl": {
+ "hashes": [
+ "sha256:24f0dc5227396b3e831f4c7f602b950a5e9833d292c8e4a2e06b709292806ae2",
+ "sha256:276f931f55a452e7dea69c7173e984eb2a4407ce413c918aa34b55f82f9b8bac"
+ ],
+ "index": "pypi",
+ "version": "==23.2.0"
+ },
+ "pysocks": {
+ "hashes": [
+ "sha256:08e69f092cc6dbe92a0fdd16eeb9b9ffbc13cadfe5ca4c7bd92ffb078b293299",
+ "sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5",
+ "sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0"
+ ],
+ "version": "==1.7.1"
+ },
+ "pytest": {
+ "hashes": [
+ "sha256:78bf16451a2eb8c7a2ea98e32dc119fd2aa758f1d5d66dbf0a59d69a3969df32",
+ "sha256:b4bf8c45bd59934ed84001ad51e11b4ee40d40a1229d2c79f9c592b0a3f6bd8a"
+ ],
+ "index": "pypi",
+ "version": "==7.4.0"
+ },
+ "pytest-timeout": {
+ "hashes": [
+ "sha256:c07ca07404c612f8abbe22294b23c368e2e5104b521c1790195561f37e1ac3d9",
+ "sha256:f6f50101443ce70ad325ceb4473c4255e9d74e3c7cd0ef827309dfa4c0d975c6"
+ ],
+ "index": "pypi",
+ "version": "==2.1.0"
+ },
+ "python-dateutil": {
+ "hashes": [
+ "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86",
+ "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"
+ ],
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
+ "version": "==2.8.2"
+ },
+ "pyyaml": {
+ "hashes": [
+ "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc",
+ "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741",
+ "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206",
+ "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27",
+ "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595",
+ "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62",
+ "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98",
+ "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696",
+ "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d",
+ "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867",
+ "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47",
+ "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486",
+ "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6",
+ "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3",
+ "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007",
+ "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938",
+ "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c",
+ "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735",
+ "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d",
+ "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba",
+ "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8",
+ "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5",
+ "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd",
+ "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3",
+ "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0",
+ "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515",
+ "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c",
+ "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c",
+ "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924",
+ "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34",
+ "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43",
+ "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859",
+ "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673",
+ "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a",
+ "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab",
+ "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa",
+ "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c",
+ "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585",
+ "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d",
+ "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"
+ ],
+ "markers": "python_version >= '3.6'",
+ "version": "==6.0.1"
+ },
+ "requests": {
+ "hashes": [
+ "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f",
+ "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"
+ ],
+ "index": "pypi",
+ "version": "==2.31.0"
+ },
+ "requests-oauthlib": {
+ "hashes": [
+ "sha256:2577c501a2fb8d05a304c09d090d6e47c306fef15809d102b327cf8364bddab5",
+ "sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a"
+ ],
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
+ "version": "==1.3.1"
+ },
+ "rsa": {
+ "hashes": [
+ "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7",
+ "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"
+ ],
+ "markers": "python_version >= '3.6' and python_version < '4'",
+ "version": "==4.9"
+ },
+ "selenium": {
+ "hashes": [
+ "sha256:98e72117b194b3fa9c69b48998f44bf7dd4152c7bd98544911a1753b9f03cc7d",
+ "sha256:9f9a5ed586280a3594f7461eb1d9dab3eac9d91e28572f365e9b98d9d03e02b5"
+ ],
+ "index": "pypi",
+ "version": "==4.11.2"
+ },
+ "six": {
+ "hashes": [
+ "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926",
+ "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"
+ ],
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
+ "version": "==1.16.0"
+ },
+ "sniffio": {
+ "hashes": [
+ "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101",
+ "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==1.3.0"
+ },
+ "sortedcontainers": {
+ "hashes": [
+ "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88",
+ "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0"
+ ],
+ "version": "==2.4.0"
+ },
+ "tomli": {
+ "hashes": [
+ "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc",
+ "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"
+ ],
+ "markers": "python_version < '3.11'",
+ "version": "==2.0.1"
+ },
+ "trio": {
+ "hashes": [
+ "sha256:3887cf18c8bcc894433420305468388dac76932e9668afa1c49aa3806b6accb3",
+ "sha256:f43da357620e5872b3d940a2e3589aa251fd3f881b65a608d742e00809b1ec38"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==0.22.2"
+ },
+ "trio-websocket": {
+ "hashes": [
+ "sha256:1a748604ad906a7dcab9a43c6eb5681e37de4793ba0847ef0bc9486933ed027b",
+ "sha256:a9937d48e8132ebf833019efde2a52ca82d223a30a7ea3e8d60a7d28f75a4e3a"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==0.10.3"
+ },
+ "urllib3": {
+ "extras": [],
+ "hashes": [
+ "sha256:8d36afa7616d8ab714608411b4a3b13e58f463aee519024578e062e141dce20f",
+ "sha256:8f135f6502756bde6b2a9b28989df5fbe87c9970cecaa69041edcce7f0589b14"
+ ],
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
+ "version": "==1.26.16"
+ },
+ "websocket-client": {
+ "hashes": [
+ "sha256:c951af98631d24f8df89ab1019fc365f2227c0892f12fd150e935607c79dd0dd",
+ "sha256:f1f9f2ad5291f0225a49efad77abf9e700b6fef553900623060dad6e26503b9d"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==1.6.1"
+ },
+ "wsproto": {
+ "hashes": [
+ "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065",
+ "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736"
+ ],
+ "markers": "python_full_version >= '3.7.0'",
+ "version": "==1.2.0"
+ }
+ },
+ "develop": {
+ "astroid": {
+ "hashes": [
+ "sha256:389656ca57b6108f939cf5d2f9a2a825a3be50ba9d589670f393236e0a03b91c",
+ "sha256:903f024859b7c7687d7a7f3a3f73b17301f8e42dfd9cc9df9d4418172d3e2dbd"
+ ],
+ "markers": "python_full_version >= '3.7.2'",
+ "version": "==2.15.6"
+ },
+ "black": {
+ "hashes": [
+ "sha256:01ede61aac8c154b55f35301fac3e730baf0c9cf8120f65a9cd61a81cfb4a0c3",
+ "sha256:022a582720b0d9480ed82576c920a8c1dde97cc38ff11d8d8859b3bd6ca9eedb",
+ "sha256:25cc308838fe71f7065df53aedd20327969d05671bac95b38fdf37ebe70ac087",
+ "sha256:27eb7a0c71604d5de083757fbdb245b1a4fae60e9596514c6ec497eb63f95320",
+ "sha256:327a8c2550ddc573b51e2c352adb88143464bb9d92c10416feb86b0f5aee5ff6",
+ "sha256:47e56d83aad53ca140da0af87678fb38e44fd6bc0af71eebab2d1f59b1acf1d3",
+ "sha256:501387a9edcb75d7ae8a4412bb8749900386eaef258f1aefab18adddea1936bc",
+ "sha256:552513d5cd5694590d7ef6f46e1767a4df9af168d449ff767b13b084c020e63f",
+ "sha256:5c4bc552ab52f6c1c506ccae05681fab58c3f72d59ae6e6639e8885e94fe2587",
+ "sha256:642496b675095d423f9b8448243336f8ec71c9d4d57ec17bf795b67f08132a91",
+ "sha256:6d1c6022b86f83b632d06f2b02774134def5d4d4f1dac8bef16d90cda18ba28a",
+ "sha256:7f3bf2dec7d541b4619b8ce526bda74a6b0bffc480a163fed32eb8b3c9aed8ad",
+ "sha256:831d8f54c3a8c8cf55f64d0422ee875eecac26f5f649fb6c1df65316b67c8926",
+ "sha256:8417dbd2f57b5701492cd46edcecc4f9208dc75529bcf76c514864e48da867d9",
+ "sha256:86cee259349b4448adb4ef9b204bb4467aae74a386bce85d56ba4f5dc0da27be",
+ "sha256:893695a76b140881531062d48476ebe4a48f5d1e9388177e175d76234ca247cd",
+ "sha256:9fd59d418c60c0348505f2ddf9609c1e1de8e7493eab96198fc89d9f865e7a96",
+ "sha256:ad0014efc7acf0bd745792bd0d8857413652979200ab924fbf239062adc12491",
+ "sha256:b5b0ee6d96b345a8b420100b7d71ebfdd19fab5e8301aff48ec270042cd40ac2",
+ "sha256:c333286dc3ddca6fdff74670b911cccedacb4ef0a60b34e491b8a67c833b343a",
+ "sha256:f9062af71c59c004cd519e2fb8f5d25d39e46d3af011b41ab43b9c74e27e236f",
+ "sha256:fb074d8b213749fa1d077d630db0d5f8cc3b2ae63587ad4116e8a436e9bbe995"
+ ],
+ "index": "pypi",
+ "version": "==23.7.0"
+ },
+ "click": {
+ "hashes": [
+ "sha256:48ee849951919527a045bfe3bf7baa8a959c423134e1a5b98c05c20ba75a1cbd",
+ "sha256:fa244bb30b3b5ee2cae3da8f55c9e5e0c0e86093306301fb418eb9dc40fbded5"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==8.1.6"
+ },
+ "dill": {
+ "hashes": [
+ "sha256:76b122c08ef4ce2eedcd4d1abd8e641114bfc6c2867f49f3c41facf65bf19f5e",
+ "sha256:cc1c8b182eb3013e24bd475ff2e9295af86c1a38eb1aff128dac8962a9ce3c03"
+ ],
+ "markers": "python_version < '3.11'",
+ "version": "==0.3.7"
+ },
+ "isort": {
+ "hashes": [
+ "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504",
+ "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6"
+ ],
+ "markers": "python_full_version >= '3.8.0'",
+ "version": "==5.12.0"
+ },
+ "lazy-object-proxy": {
+ "hashes": [
+ "sha256:09763491ce220c0299688940f8dc2c5d05fd1f45af1e42e636b2e8b2303e4382",
+ "sha256:0a891e4e41b54fd5b8313b96399f8b0e173bbbfc03c7631f01efbe29bb0bcf82",
+ "sha256:189bbd5d41ae7a498397287c408617fe5c48633e7755287b21d741f7db2706a9",
+ "sha256:18b78ec83edbbeb69efdc0e9c1cb41a3b1b1ed11ddd8ded602464c3fc6020494",
+ "sha256:1aa3de4088c89a1b69f8ec0dcc169aa725b0ff017899ac568fe44ddc1396df46",
+ "sha256:212774e4dfa851e74d393a2370871e174d7ff0ebc980907723bb67d25c8a7c30",
+ "sha256:2d0daa332786cf3bb49e10dc6a17a52f6a8f9601b4cf5c295a4f85854d61de63",
+ "sha256:5f83ac4d83ef0ab017683d715ed356e30dd48a93746309c8f3517e1287523ef4",
+ "sha256:659fb5809fa4629b8a1ac5106f669cfc7bef26fbb389dda53b3e010d1ac4ebae",
+ "sha256:660c94ea760b3ce47d1855a30984c78327500493d396eac4dfd8bd82041b22be",
+ "sha256:66a3de4a3ec06cd8af3f61b8e1ec67614fbb7c995d02fa224813cb7afefee701",
+ "sha256:721532711daa7db0d8b779b0bb0318fa87af1c10d7fe5e52ef30f8eff254d0cd",
+ "sha256:7322c3d6f1766d4ef1e51a465f47955f1e8123caee67dd641e67d539a534d006",
+ "sha256:79a31b086e7e68b24b99b23d57723ef7e2c6d81ed21007b6281ebcd1688acb0a",
+ "sha256:81fc4d08b062b535d95c9ea70dbe8a335c45c04029878e62d744bdced5141586",
+ "sha256:8fa02eaab317b1e9e03f69aab1f91e120e7899b392c4fc19807a8278a07a97e8",
+ "sha256:9090d8e53235aa280fc9239a86ae3ea8ac58eff66a705fa6aa2ec4968b95c821",
+ "sha256:946d27deaff6cf8452ed0dba83ba38839a87f4f7a9732e8f9fd4107b21e6ff07",
+ "sha256:9990d8e71b9f6488e91ad25f322898c136b008d87bf852ff65391b004da5e17b",
+ "sha256:9cd077f3d04a58e83d04b20e334f678c2b0ff9879b9375ed107d5d07ff160171",
+ "sha256:9e7551208b2aded9c1447453ee366f1c4070602b3d932ace044715d89666899b",
+ "sha256:9f5fa4a61ce2438267163891961cfd5e32ec97a2c444e5b842d574251ade27d2",
+ "sha256:b40387277b0ed2d0602b8293b94d7257e17d1479e257b4de114ea11a8cb7f2d7",
+ "sha256:bfb38f9ffb53b942f2b5954e0f610f1e721ccebe9cce9025a38c8ccf4a5183a4",
+ "sha256:cbf9b082426036e19c6924a9ce90c740a9861e2bdc27a4834fd0a910742ac1e8",
+ "sha256:d9e25ef10a39e8afe59a5c348a4dbf29b4868ab76269f81ce1674494e2565a6e",
+ "sha256:db1c1722726f47e10e0b5fdbf15ac3b8adb58c091d12b3ab713965795036985f",
+ "sha256:e7c21c95cae3c05c14aafffe2865bbd5e377cfc1348c4f7751d9dc9a48ca4bda",
+ "sha256:e8c6cfb338b133fbdbc5cfaa10fe3c6aeea827db80c978dbd13bc9dd8526b7d4",
+ "sha256:ea806fd4c37bf7e7ad82537b0757999264d5f70c45468447bb2b91afdbe73a6e",
+ "sha256:edd20c5a55acb67c7ed471fa2b5fb66cb17f61430b7a6b9c3b4a1e40293b1671",
+ "sha256:f0117049dd1d5635bbff65444496c90e0baa48ea405125c088e93d9cf4525b11",
+ "sha256:f0705c376533ed2a9e5e97aacdbfe04cecd71e0aa84c7c0595d02ef93b6e4455",
+ "sha256:f12ad7126ae0c98d601a7ee504c1122bcef553d1d5e0c3bfa77b16b3968d2734",
+ "sha256:f2457189d8257dd41ae9b434ba33298aec198e30adf2dcdaaa3a28b9994f6adb",
+ "sha256:f699ac1c768270c9e384e4cbd268d6e67aebcfae6cd623b4d7c3bfde5a35db59"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==1.9.0"
+ },
+ "mccabe": {
+ "hashes": [
+ "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325",
+ "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"
+ ],
+ "markers": "python_version >= '3.6'",
+ "version": "==0.7.0"
+ },
+ "mypy-extensions": {
+ "hashes": [
+ "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d",
+ "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"
+ ],
+ "markers": "python_version >= '3.5'",
+ "version": "==1.0.0"
+ },
+ "packaging": {
+ "hashes": [
+ "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61",
+ "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==23.1"
+ },
+ "pathspec": {
+ "hashes": [
+ "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20",
+ "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==0.11.2"
+ },
+ "platformdirs": {
+ "hashes": [
+ "sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d",
+ "sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==3.10.0"
+ },
+ "pylint": {
+ "hashes": [
+ "sha256:73995fb8216d3bed149c8d51bba25b2c52a8251a2c8ac846ec668ce38fab5413",
+ "sha256:f7b601cbc06fef7e62a754e2b41294c2aa31f1cb659624b9a85bcba29eaf8252"
+ ],
+ "index": "pypi",
+ "version": "==2.17.5"
+ },
+ "tomli": {
+ "hashes": [
+ "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc",
+ "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"
+ ],
+ "markers": "python_version < '3.11'",
+ "version": "==2.0.1"
+ },
+ "tomlkit": {
+ "hashes": [
+ "sha256:38e1ff8edb991273ec9f6181244a6a391ac30e9f5098e7535640ea6be97a7c86",
+ "sha256:712cbd236609acc6a3e2e97253dfc52d4c2082982a88f61b640ecf0817eab899"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==0.12.1"
+ },
+ "typing-extensions": {
+ "hashes": [
+ "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36",
+ "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"
+ ],
+ "markers": "python_version < '3.10'",
+ "version": "==4.7.1"
+ },
+ "wrapt": {
+ "hashes": [
+ "sha256:02fce1852f755f44f95af51f69d22e45080102e9d00258053b79367d07af39c0",
+ "sha256:077ff0d1f9d9e4ce6476c1a924a3332452c1406e59d90a2cf24aeb29eeac9420",
+ "sha256:078e2a1a86544e644a68422f881c48b84fef6d18f8c7a957ffd3f2e0a74a0d4a",
+ "sha256:0970ddb69bba00670e58955f8019bec4a42d1785db3faa043c33d81de2bf843c",
+ "sha256:1286eb30261894e4c70d124d44b7fd07825340869945c79d05bda53a40caa079",
+ "sha256:21f6d9a0d5b3a207cdf7acf8e58d7d13d463e639f0c7e01d82cdb671e6cb7923",
+ "sha256:230ae493696a371f1dbffaad3dafbb742a4d27a0afd2b1aecebe52b740167e7f",
+ "sha256:26458da5653aa5b3d8dc8b24192f574a58984c749401f98fff994d41d3f08da1",
+ "sha256:2cf56d0e237280baed46f0b5316661da892565ff58309d4d2ed7dba763d984b8",
+ "sha256:2e51de54d4fb8fb50d6ee8327f9828306a959ae394d3e01a1ba8b2f937747d86",
+ "sha256:2fbfbca668dd15b744418265a9607baa970c347eefd0db6a518aaf0cfbd153c0",
+ "sha256:38adf7198f8f154502883242f9fe7333ab05a5b02de7d83aa2d88ea621f13364",
+ "sha256:3a8564f283394634a7a7054b7983e47dbf39c07712d7b177b37e03f2467a024e",
+ "sha256:3abbe948c3cbde2689370a262a8d04e32ec2dd4f27103669a45c6929bcdbfe7c",
+ "sha256:3bbe623731d03b186b3d6b0d6f51865bf598587c38d6f7b0be2e27414f7f214e",
+ "sha256:40737a081d7497efea35ab9304b829b857f21558acfc7b3272f908d33b0d9d4c",
+ "sha256:41d07d029dd4157ae27beab04d22b8e261eddfc6ecd64ff7000b10dc8b3a5727",
+ "sha256:46ed616d5fb42f98630ed70c3529541408166c22cdfd4540b88d5f21006b0eff",
+ "sha256:493d389a2b63c88ad56cdc35d0fa5752daac56ca755805b1b0c530f785767d5e",
+ "sha256:4ff0d20f2e670800d3ed2b220d40984162089a6e2c9646fdb09b85e6f9a8fc29",
+ "sha256:54accd4b8bc202966bafafd16e69da9d5640ff92389d33d28555c5fd4f25ccb7",
+ "sha256:56374914b132c702aa9aa9959c550004b8847148f95e1b824772d453ac204a72",
+ "sha256:578383d740457fa790fdf85e6d346fda1416a40549fe8db08e5e9bd281c6a475",
+ "sha256:58d7a75d731e8c63614222bcb21dd992b4ab01a399f1f09dd82af17bbfc2368a",
+ "sha256:5c5aa28df055697d7c37d2099a7bc09f559d5053c3349b1ad0c39000e611d317",
+ "sha256:5fc8e02f5984a55d2c653f5fea93531e9836abbd84342c1d1e17abc4a15084c2",
+ "sha256:63424c681923b9f3bfbc5e3205aafe790904053d42ddcc08542181a30a7a51bd",
+ "sha256:64b1df0f83706b4ef4cfb4fb0e4c2669100fd7ecacfb59e091fad300d4e04640",
+ "sha256:74934ebd71950e3db69960a7da29204f89624dde411afbfb3b4858c1409b1e98",
+ "sha256:75669d77bb2c071333417617a235324a1618dba66f82a750362eccbe5b61d248",
+ "sha256:75760a47c06b5974aa5e01949bf7e66d2af4d08cb8c1d6516af5e39595397f5e",
+ "sha256:76407ab327158c510f44ded207e2f76b657303e17cb7a572ffe2f5a8a48aa04d",
+ "sha256:76e9c727a874b4856d11a32fb0b389afc61ce8aaf281ada613713ddeadd1cfec",
+ "sha256:77d4c1b881076c3ba173484dfa53d3582c1c8ff1f914c6461ab70c8428b796c1",
+ "sha256:780c82a41dc493b62fc5884fb1d3a3b81106642c5c5c78d6a0d4cbe96d62ba7e",
+ "sha256:7dc0713bf81287a00516ef43137273b23ee414fe41a3c14be10dd95ed98a2df9",
+ "sha256:7eebcdbe3677e58dd4c0e03b4f2cfa346ed4049687d839adad68cc38bb559c92",
+ "sha256:896689fddba4f23ef7c718279e42f8834041a21342d95e56922e1c10c0cc7afb",
+ "sha256:96177eb5645b1c6985f5c11d03fc2dbda9ad24ec0f3a46dcce91445747e15094",
+ "sha256:96e25c8603a155559231c19c0349245eeb4ac0096fe3c1d0be5c47e075bd4f46",
+ "sha256:9d37ac69edc5614b90516807de32d08cb8e7b12260a285ee330955604ed9dd29",
+ "sha256:9ed6aa0726b9b60911f4aed8ec5b8dd7bf3491476015819f56473ffaef8959bd",
+ "sha256:a487f72a25904e2b4bbc0817ce7a8de94363bd7e79890510174da9d901c38705",
+ "sha256:a4cbb9ff5795cd66f0066bdf5947f170f5d63a9274f99bdbca02fd973adcf2a8",
+ "sha256:a74d56552ddbde46c246b5b89199cb3fd182f9c346c784e1a93e4dc3f5ec9975",
+ "sha256:a89ce3fd220ff144bd9d54da333ec0de0399b52c9ac3d2ce34b569cf1a5748fb",
+ "sha256:abd52a09d03adf9c763d706df707c343293d5d106aea53483e0ec8d9e310ad5e",
+ "sha256:abd8f36c99512755b8456047b7be10372fca271bf1467a1caa88db991e7c421b",
+ "sha256:af5bd9ccb188f6a5fdda9f1f09d9f4c86cc8a539bd48a0bfdc97723970348418",
+ "sha256:b02f21c1e2074943312d03d243ac4388319f2456576b2c6023041c4d57cd7019",
+ "sha256:b06fa97478a5f478fb05e1980980a7cdf2712015493b44d0c87606c1513ed5b1",
+ "sha256:b0724f05c396b0a4c36a3226c31648385deb6a65d8992644c12a4963c70326ba",
+ "sha256:b130fe77361d6771ecf5a219d8e0817d61b236b7d8b37cc045172e574ed219e6",
+ "sha256:b56d5519e470d3f2fe4aa7585f0632b060d532d0696c5bdfb5e8319e1d0f69a2",
+ "sha256:b67b819628e3b748fd3c2192c15fb951f549d0f47c0449af0764d7647302fda3",
+ "sha256:ba1711cda2d30634a7e452fc79eabcadaffedf241ff206db2ee93dd2c89a60e7",
+ "sha256:bbeccb1aa40ab88cd29e6c7d8585582c99548f55f9b2581dfc5ba68c59a85752",
+ "sha256:bd84395aab8e4d36263cd1b9308cd504f6cf713b7d6d3ce25ea55670baec5416",
+ "sha256:c99f4309f5145b93eca6e35ac1a988f0dc0a7ccf9ccdcd78d3c0adf57224e62f",
+ "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1",
+ "sha256:cd525e0e52a5ff16653a3fc9e3dd827981917d34996600bbc34c05d048ca35cc",
+ "sha256:cdb4f085756c96a3af04e6eca7f08b1345e94b53af8921b25c72f096e704e145",
+ "sha256:ce42618f67741d4697684e501ef02f29e758a123aa2d669e2d964ff734ee00ee",
+ "sha256:d06730c6aed78cee4126234cf2d071e01b44b915e725a6cb439a879ec9754a3a",
+ "sha256:d5fe3e099cf07d0fb5a1e23d399e5d4d1ca3e6dfcbe5c8570ccff3e9208274f7",
+ "sha256:d6bcbfc99f55655c3d93feb7ef3800bd5bbe963a755687cbf1f490a71fb7794b",
+ "sha256:d787272ed958a05b2c86311d3a4135d3c2aeea4fc655705f074130aa57d71653",
+ "sha256:e169e957c33576f47e21864cf3fc9ff47c223a4ebca8960079b8bd36cb014fd0",
+ "sha256:e20076a211cd6f9b44a6be58f7eeafa7ab5720eb796975d0c03f05b47d89eb90",
+ "sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29",
+ "sha256:eef4d64c650f33347c1f9266fa5ae001440b232ad9b98f1f43dfe7a79435c0a6",
+ "sha256:f2e69b3ed24544b0d3dbe2c5c0ba5153ce50dcebb576fdc4696d52aa22db6034",
+ "sha256:f87ec75864c37c4c6cb908d282e1969e79763e0d9becdfe9fe5473b7bb1e5f09",
+ "sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559",
+ "sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639"
+ ],
+ "markers": "python_version < '3.11'",
+ "version": "==1.15.0"
+ }
+ }
+}
diff --git a/charts/k8s-gerrit/README.md b/charts/k8s-gerrit/README.md
new file mode 100644
index 0000000..5c16b2e
--- /dev/null
+++ b/charts/k8s-gerrit/README.md
@@ -0,0 +1,292 @@
+# Gerrit Deployment on Kubernetes
+
+Container images, configurations, [helm charts](https://helm.sh/) and a Kubernetes
+Operator for installing [Gerrit](https://www.gerritcodereview.com/) on
+[Kubernetes](https://kubernetes.io/).
+
+# Deploying Gerrit on Kubernetes
+
+This project provides helm-charts to install Gerrit either as a primary instance
+or a replica on Kubernetes.
+
+The helm-charts are located in the `./helm-charts`-directory. Currently, the
+charts are not published in a registry and have to be deployed from local sources.
+
+For a detailed guide of how to install the helm-charts refer to the respective
+READMEs in the helm-charts directories:
+
+- [gerrit](helm-charts/gerrit/README.md)
+- [gerrit-replica](helm-charts/gerrit-replica/README.md)
+
+These READMEs detail the prerequisites required by the charts as well as all
+configuration options currently provided by the charts.
+
+To evaluate and test the helm-charts, they can be installed on a local machine
+running Minikube. Follow this [guide](Documentation/minikube.md) to get a detailed
+description how to set up the Minikube cluster and install the charts.
+
+Alternatively, a Gerrit Operator can be used to install and operate Gerrit in a
+Kubernetes cluster. The [documentation](./Documentation/operator.md) describes
+how to build and deploy the Gerrit Operator and how to use it to install Gerrit.
+
+# Docker images
+
+This project provides the sources for docker images used by the helm-charts.
+The images are also provided on [Dockerhub](https://hub.docker.com/u/k8sgerrit).
+
+The project also provides scripts to build and publish the images so that custom
+versions can be used by the helm-charts. This requires however a docker registry
+that can be accessed from the Kubernetes cluster, on which Gerrit will be
+deployed. The functionality of the scripts is described in the following sub-
+sections.
+
+## Building images
+
+To build all images, the `build`-script in the root directory of the project can
+be used:
+
+```
+./build
+```
+
+If a specific image should be built, the image name can be specified as an argument.
+Multiple images can be specified at once:
+
+```
+./build gerrit git-gc
+```
+
+The build-script usually uses the `latest`-tag to tag the images. By using the
+`--tag TAG`-option, a custom tag can be defined:
+
+```
+./build --tag test
+```
+
+The version of Gerrit built into the images can be changed by providing a download
+URL for a `.war`-file containing Gerrit:
+
+```
+./build --gerrit-url https://example.com/gerrit.war
+```
+
+The version of a health-check plugin built into the images can be changed by
+providing a download URL for a `.jar`-file containing the plugin:
+
+```
+./build --healthcheck-jar-url https://example.com/healthcheck.jar
+```
+
+The build script will in addition tag the image with the output of
+`git describe --dirty`.
+
+The single component images inherit a base image. The `Dockerfile` for the base
+image can be found in the `./base`-directory. It will be
+automatically built by the `./build`-script. If the component images are built
+manually, the base image has to be built first with the target
+`base:latest`, since it is not available in a registry and thus has
+to exist locally.
+
+## Publishing images
+
+The publish script in the root directory of the project can be used to push the
+built images to the configured registry. To do so, log in first, before executing
+the script.
+
+```
+docker login <registry>
+```
+
+To configure the registry and image version, the respective values can be
+configured via env variables `REGISTRY` and `TAG`. In addition, these values can
+also be passed as command line options named `--registry` and `--tag` in which
+case they override the values from env variables:
+
+```
+./publish <component-name>
+```
+
+The `<component-name>` is one of: `apache-git-http-backend`, `git-gc`, `gerrit`
+or `gerrit-init`.
+
+Adding the `--update-latest`-flag will also update the images tagged `latest` in
+the repository:
+
+```
+./publish --update-latest <component-name>
+```
+
+## Running images in Docker
+
+The container images are meant to be used by the helm-charts provided in this
+project. The images are thus not designed to be used in a standalone setup. To
+run Gerrit on Docker use the
+[docker-gerrit](https://gerrit-review.googlesource.com/admin/repos/docker-gerrit)
+project.
+
+# Running tests
+
+The tests are implemented using Python and `pytest`. To ensure a well-defined
+test-environment, `pipenv` is meant to be used to install packages and provide a
+virtual environment in which to run the tests. To install pipenv, use `brew`:
+
+```sh
+brew install pipenv
+```
+
+More detailed information can be found in the
+[pipenv GitHub repo](https://github.com/pypa/pipenv).
+
+To create the virtual environment with all required packages, run:
+
+```sh
+pipenv install
+```
+
+To run all tests, execute:
+
+```sh
+pipenv run pytest -m "not smoke"
+```
+
+***note
+The `-m "not smoke"`-option excludes the smoke tests, which will fail, since
+no Gerrit-instance will be running, when they are executed.
+***
+
+Some tests will need to create files in a temporary directory. Some of these
+files will be mounted into docker containers by tests. For this to work make
+either sure that the system temporary directory is accessible by the Docker
+daemon or set the base temporary directory to a directory accessible by Docker
+by executing:
+
+```sh
+pipenv run pytest --basetemp=/tmp/k8sgerrit -m "not smoke"
+```
+
+By default the tests will build all images from scratch. This will greatly
+increase the time needed for testing. To use already existing container images,
+a tag can be provided as follows:
+
+```sh
+pipenv run pytest --tag=v0.1 -m "not smoke"
+```
+
+The tests will then use the existing images with the provided tag. If an image
+does not exist, it will still be built by the tests.
+
+By default the build of the container images will not use the build cache
+created by docker. To enable the cache, execute:
+
+```sh
+pipenv run pytest --build-cache -m "not smoke"
+```
+
+Slow tests may be marked with the decorator `@pytest.mark.slow`. These tests
+may then be skipped as follows:
+
+```sh
+pipenv run pytest --skip-slow -m "not smoke"
+```
+
+There are also other marks, allowing to select tests (refer to
+[this section](#test-marks)).
+
+To run specific tests, execute one of the following:
+
+```sh
+# Run all tests in a directory (including subdirectories)
+pipenv run pytest tests/container-images/base
+
+# Run all tests in a file
+pipenv run pytest tests/container-images/base/test_container_build_base.py
+
+# Run a specific test
+pipenv run \
+ pytest tests/container-images/base/test_container_build_base.py::test_build_base
+
+# Run tests with a specific marker
+pipenv run pytest -m "docker"
+```
+
+For a more detailed description of how to use `pytest`, refer to the
+[official documentation](https://docs.pytest.org/en/latest/contents.html).
+
+## Test marks
+
+### docker
+
+Marks tests which start up docker containers. These tests will interact with
+the containers by either using `docker exec` or sending HTTP-requests. Make
+sure that your system supports this kind of interaction.
+
+### incremental
+
+Marks test classes in which the contained test functions have to run
+incrementally.
+
+### integration
+
+Marks integration tests. These tests test interactions between containers,
+between outside clients and containers and between the components installed
+by a helm chart.
+
+### kubernetes
+
+Marks tests that require a Kubernetes cluster. These tests are used to test the
+functionality of the helm charts in this project and the interaction of the
+components installed by them. The cluster should not be used for other purposes
+to minimize unforeseen interactions.
+
+These tests require a storage class with ReadWriteMany access mode within the
+cluster. The name of the storage class has to be provided with the
+`--rwm-storageclass`-option (default: `shared-storage`).
+
+### slow
+
+Marks tests that need an above average time to run.
+
+### structure
+
+Marks structure tests. These tests are meant to test, whether certain components
+exist in a container. These tests ensure that components expected by the users
+of the container, e.g. the helm charts, are present in the containers.
+
+## Running smoke tests
+
+To run smoke tests, use the following command:
+
+```sh
+pipenv run pytest \
+ -m "smoke" \
+ --basetemp="<tmp-dir for tests>" \
+ --ingress-url="<Gerrit URL>" \
+ --gerrit-user="<Gerrit user>" \
+ --gerrit-pwd
+```
+
+The smoke tests require a Gerrit user that is allowed to create and delete
+projects. The username has to be given by `--gerit-user`. Setting the
+`--gerrit-pwd`-flag will cause a password prompt to enter the password of the
+Gerrit-user.
+
+# Contributing
+
+Contributions to this project are welcome. If you are new to the Gerrit workflow,
+refer to the [Gerrit-documentation](https://gerrit-review.googlesource.com/Documentation/intro-gerrit-walkthrough.html)
+for guidance on how to contribute changes.
+
+The contribution guidelines for this project can be found
+[here](Documentation/developer-guide.md).
+
+# Roadmap
+
+The roadmap of this project can be found [here](Documentation/roadmap.md).
+
+Feature requests can be made by pushing a change for the roadmap. This can also
+be done to announce/discuss features that you would like to provide.
+
+# Contact
+
+The [Gerrit Mailing List](https://groups.google.com/forum/#!forum/repo-discuss)
+can be used to post questions and comments on this project or Gerrit in general.
diff --git a/charts/k8s-gerrit/build b/charts/k8s-gerrit/build
new file mode 100755
index 0000000..aa8de36
--- /dev/null
+++ b/charts/k8s-gerrit/build
@@ -0,0 +1,128 @@
+#!/bin/bash
+
+usage() {
+ me=`basename "$0"`
+ echo >&2 "Usage: $me [--help] [--tag TAG] [--gerrit-url URL] [--base-image IMAGE] [IMAGE]"
+ exit 1
+}
+
+while test $# -gt 0 ; do
+ case "$1" in
+ --help)
+ usage
+ ;;
+
+ --tag)
+ shift
+ TAG=$1
+ shift
+ ;;
+
+ --gerrit-url)
+ shift
+ GERRIT_WAR_URL=$1
+ shift
+ ;;
+
+ --healthcheck-jar-url)
+ shift
+ HEALTHCHECK_JAR_URL=$1
+ shift
+ ;;
+
+ --base-image)
+ shift
+ BASE_IMAGE=$1
+ shift
+ ;;
+
+ *)
+ break
+ esac
+done
+
+#Get list of images
+source container-images/publish_list
+IMAGES=$(get_image_list)
+
+if test -n "$GERRIT_WAR_URL"; then
+ BUILD_ARGS="--build-arg GERRIT_WAR_URL=$GERRIT_WAR_URL"
+fi
+
+if test -n "$HEALTHCHECK_JAR_URL"; then
+ BUILD_ARGS="$BUILD_ARGS --build-arg HEALTHCHECK_JAR_URL=$HEALTHCHECK_JAR_URL"
+fi
+
+export REV="$(git describe --always --dirty)"
+
+docker_build(){
+ IMAGE=$1
+
+ docker build \
+ --platform=linux/amd64 \
+ --build-arg TAG=$REV \
+ -t k8sgerrit/$IMAGE:$TAG \
+ ./container-images/$IMAGE
+
+ if test $? -ne 0; then
+ REPORT="$REPORT Failed: k8sgerrit/$IMAGE.\n"
+ RETURN_CODE=1
+ else
+ REPORT="$REPORT Success: k8sgerrit/$IMAGE:$TAG\n"
+ fi
+}
+
+docker_build_gerrit_base(){
+ BUILD_ARGS="$BUILD_ARGS --build-arg TAG=$REV"
+ docker build \
+ --platform=linux/amd64 \
+ $BUILD_ARGS \
+ -t gerrit-base:$REV \
+ ./container-images/gerrit-base
+ if test $? -ne 0; then
+ echo -e "\n\nFailed to build gerrit-base image."
+ exit 1
+ fi
+
+ if test -z "$TAG"; then
+ export TAG="$(./get_version.sh)"
+ fi
+}
+
+REPORT="Build results: \n"
+RETURN_CODE=0
+
+if test -n "$BASE_IMAGE"; then
+ BASE_BUILD_ARGS="--build-arg BASE_IMAGE=$BASE_IMAGE"
+fi
+
+docker build $BASE_BUILD_ARGS --platform=linux/amd64 -t base:$REV ./container-images/base
+if test $? -ne 0; then
+ echo -e "\n\nFailed to build base image."
+ exit 1
+fi
+
+if test $# -eq 0 ; then
+ docker_build_gerrit_base
+ for IMAGE in $IMAGES; do
+ docker_build $IMAGE
+ done
+else
+ while test $# -gt 0 ; do
+ if [[ $1 = gerrit-* ]]; then
+ docker_build_gerrit_base
+ else
+ if test -z "$TAG"; then
+ TAG="$(git describe --always --dirty)-unknown"
+ fi
+ echo -e "\nNo Image containing Gerrit will be built." \
+ "The Gerrit-version can thus not be determinded." \
+ "Using tag $TAG\n"
+ fi
+ docker_build $1
+ shift
+ done
+fi
+
+echo -e "\n\n$REPORT"
+exit $RETURN_CODE
diff --git a/charts/k8s-gerrit/container-images/apache-git-http-backend/Dockerfile b/charts/k8s-gerrit/container-images/apache-git-http-backend/Dockerfile
new file mode 100644
index 0000000..aa6c6c9
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/apache-git-http-backend/Dockerfile
@@ -0,0 +1,30 @@
+ARG TAG=latest
+FROM base:${TAG}
+
+# Install apache2
+RUN apk update && \
+ apk add --no-cache \
+ apache2 \
+ apache2-ctl \
+ apache2-utils \
+ git-daemon \
+ logrotate && \
+ rm /etc/apache2/conf.d/default.conf && \
+ rm /etc/apache2/conf.d/info.conf
+
+# Configure git-http-backend
+COPY config/git-http-backend.conf /etc/apache2/conf.d/
+COPY config/envvars /usr/sbin/envvars
+COPY config/httpd.conf /etc/apache2/httpd.conf
+COPY config/logrotation /etc/logrotate.d/apache2
+
+COPY tools/start /var/tools/start
+COPY tools/project_admin.sh /var/cgi/project_admin.sh
+
+RUN mkdir -p /var/gerrit/git && \
+ mkdir -p /var/log/apache2 && \
+ chown -R gerrit:users /var/gerrit/git && \
+ chown -R gerrit:users /var/log/apache2
+
+# Start
+ENTRYPOINT ["ash", "/var/tools/start"]
diff --git a/charts/k8s-gerrit/container-images/apache-git-http-backend/README.md b/charts/k8s-gerrit/container-images/apache-git-http-backend/README.md
new file mode 100644
index 0000000..0b7146c
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/apache-git-http-backend/README.md
@@ -0,0 +1,26 @@
+# apache-git-http-backend
+
+The apache-git-http-backend docker image serves as receiver in git replication
+from a Gerrit to a Gerrit replica.
+
+## Content
+
+* base image
+* Apache webserver
+* Apache configurations for http
+* git (via base image) and git-deamon for git-http-backend
+* `tools/project_admin.sh`: cgi script to enable remote creation/deletion/HEAD update
+ of git repositories. Compatible with replication plugin.
+* `tools/start`: start script, configures and starts Apache
+ webserver
+
+## Setup and Configuration
+
+* install Apache webserver, additional Apache tools and git daemon
+* configure Apache
+* install cgi scripts
+* map volumes
+
+## Start
+
+* start Apache git-http backend via start script `/var/tools/start`
diff --git a/charts/k8s-gerrit/container-images/apache-git-http-backend/config/envvars b/charts/k8s-gerrit/container-images/apache-git-http-backend/config/envvars
new file mode 100644
index 0000000..97d9f7e
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/apache-git-http-backend/config/envvars
@@ -0,0 +1,43 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+# envvars-std - default environment variables for apachectl
+#
+# This file is generated from envvars-std.in
+#
+if test "x$LD_LIBRARY_PATH" != "x" ; then
+ LD_LIBRARY_PATH="/usr/lib:$LD_LIBRARY_PATH"
+else
+ LD_LIBRARY_PATH="/usr/lib"
+fi
+export LD_LIBRARY_PATH
+#
+
+# this won't be correct after changing uid
+unset HOME
+
+# Since there is no sane way to get the parsed apache2 config in scripts, some
+# settings are defined via environment variables and then used in apache2ctl,
+# /etc/init.d/apache2, /etc/logrotate.d/apache2, etc.
+export APACHE_RUN_USER=gerrit
+export APACHE_RUN_GROUP=users
+# Only /var/log/apache2 is handled by /etc/logrotate.d/apache2.
+export APACHE_LOG_DIR=/var/log/apache2$SUFFIX
+
+## Uncomment the following line to use the system default locale instead:
+#. /etc/default/locale
+
+export LANG
diff --git a/charts/k8s-gerrit/container-images/apache-git-http-backend/config/git-http-backend.conf b/charts/k8s-gerrit/container-images/apache-git-http-backend/config/git-http-backend.conf
new file mode 100644
index 0000000..8967bb1
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/apache-git-http-backend/config/git-http-backend.conf
@@ -0,0 +1,58 @@
+<VirtualHost *:80>
+ # The ServerName directive sets the request scheme, hostname and port that
+ # the server uses to identify itself. This is used when creating
+ # redirection URLs. In the context of virtual hosts, the ServerName
+ # specifies what hostname must appear in the request's Host: header to
+ # match this virtual host. For the default virtual host (this file) this
+ # value is not decisive as it is used as a last resort host regardless.
+ # However, you must set it for any further virtual host explicitly.
+ ServerName localhost
+ ServerAdmin webmaster@localhost
+
+ UseCanonicalName On
+
+ AllowEncodedSlashes On
+
+ SetEnv GIT_PROJECT_ROOT /var/gerrit/git
+ SetEnv GIT_HTTP_EXPORT_ALL
+ ScriptAliasMatch "(?i)^/a/projects/(.*)" "/var/cgi/project_admin.sh"
+ ScriptAlias / /usr/libexec/git-core/git-http-backend/
+ ScriptLog logs/cgi.log
+
+ # Available loglevels: trace8, ..., trace1, debug, info, notice, warn,
+ # error, crit, alert, emerg.
+ # It is also possible to configure the loglevel for particular
+ # modules, e.g.
+ LogLevel debug authz_core:warn
+
+ # Don't log probe requests performed by kubernetes
+ SetEnvIFNoCase User-Agent "^kube-probe" dontlog
+
+ ErrorLog ${APACHE_LOG_DIR}/error.log
+ CustomLog ${APACHE_LOG_DIR}/access.log combined env=!dontlog
+
+ # For most configuration files from conf-available/, which are
+ # enabled or disabled at a global level, it is possible to
+ # include a line for only one particular virtual host. For example the
+ # following line enables the CGI configuration for this host only
+ # after it has been globally disabled with "a2disconf".
+ #Include conf-available/serve-cgi-bin.conf
+ <Files "git-http-backend">
+ AuthType Basic
+ AuthName "Restricted Content"
+ AuthUserFile /var/apache/credentials/.htpasswd
+ Require valid-user
+ </Files>
+ <Files "create_repo.sh">
+ AuthType Basic
+ AuthName "Restricted Content"
+ AuthUserFile /var/apache/credentials/.htpasswd
+ Require valid-user
+ </Files>
+ <Files "project_admin.sh">
+ AuthType Basic
+ AuthName "Restricted Content"
+ AuthUserFile /var/apache/credentials/.htpasswd
+ Require valid-user
+ </Files>
+</VirtualHost>
diff --git a/charts/k8s-gerrit/container-images/apache-git-http-backend/config/httpd.conf b/charts/k8s-gerrit/container-images/apache-git-http-backend/config/httpd.conf
new file mode 100644
index 0000000..7a28460
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/apache-git-http-backend/config/httpd.conf
@@ -0,0 +1,510 @@
+# This is the main Apache HTTP server configuration file. It contains the
+# configuration directives that give the server its instructions.
+# See <URL:http://httpd.apache.org/docs/2.4/> for detailed information.
+# In particular, see
+# <URL:http://httpd.apache.org/docs/2.4/mod/directives.html>
+# for a discussion of each configuration directive.
+#
+# Do NOT simply read the instructions in here without understanding
+# what they do. They're here only as hints or reminders. If you are unsure
+# consult the online docs. You have been warned.
+#
+# Configuration and logfile names: If the filenames you specify for many
+# of the server's control files begin with "/" (or "drive:/" for Win32), the
+# server will use that explicit path. If the filenames do *not* begin
+# with "/", the value of ServerRoot is prepended -- so "logs/access_log"
+# with ServerRoot set to "/usr/local/apache2" will be interpreted by the
+# server as "/usr/local/apache2/logs/access_log", whereas "/logs/access_log"
+# will be interpreted as '/logs/access_log'.
+
+#
+# ServerTokens
+# This directive configures what you return as the Server HTTP response
+# Header. The default is 'Full' which sends information about the OS-Type
+# and compiled in modules.
+# Set to one of: Full | OS | Minor | Minimal | Major | Prod
+# where Full conveys the most information, and Prod the least.
+#
+ServerTokens OS
+
+#
+# ServerRoot: The top of the directory tree under which the server's
+# configuration, error, and log files are kept.
+#
+# Do not add a slash at the end of the directory path. If you point
+# ServerRoot at a non-local disk, be sure to specify a local disk on the
+# Mutex directive, if file-based mutexes are used. If you wish to share the
+# same ServerRoot for multiple httpd daemons, you will need to change at
+# least PidFile.
+#
+ServerRoot /var/www
+
+#
+# Mutex: Allows you to set the mutex mechanism and mutex file directory
+# for individual mutexes, or change the global defaults
+#
+# Uncomment and change the directory if mutexes are file-based and the default
+# mutex file directory is not on a local disk or is not appropriate for some
+# other reason.
+#
+# Mutex default:/run/apache2
+
+#
+# Listen: Allows you to bind Apache to specific IP addresses and/or
+# ports, instead of the default. See also the <VirtualHost>
+# directive.
+#
+# Change this to Listen on specific IP addresses as shown below to
+# prevent Apache from glomming onto all bound IP addresses.
+#
+#Listen 12.34.56.78:80
+Listen 80
+
+#
+# Dynamic Shared Object (DSO) Support
+#
+# To be able to use the functionality of a module which was built as a DSO you
+# have to place corresponding `LoadModule' lines at this location so the
+# directives contained in it are actually available _before_ they are used.
+# Statically compiled modules (those listed by `httpd -l') do not need
+# to be loaded here.
+#
+# Example:
+# LoadModule foo_module modules/mod_foo.so
+#
+LoadModule mpm_event_module modules/mod_mpm_event.so
+#LoadModule mpm_prefork_module modules/mod_mpm_prefork.so
+#LoadModule mpm_worker_module modules/mod_mpm_worker.so
+LoadModule authn_file_module modules/mod_authn_file.so
+#LoadModule authn_dbm_module modules/mod_authn_dbm.so
+#LoadModule authn_anon_module modules/mod_authn_anon.so
+#LoadModule authn_dbd_module modules/mod_authn_dbd.so
+#LoadModule authn_socache_module modules/mod_authn_socache.so
+LoadModule authn_core_module modules/mod_authn_core.so
+LoadModule authz_host_module modules/mod_authz_host.so
+LoadModule authz_groupfile_module modules/mod_authz_groupfile.so
+LoadModule authz_user_module modules/mod_authz_user.so
+#LoadModule authz_dbm_module modules/mod_authz_dbm.so
+#LoadModule authz_owner_module modules/mod_authz_owner.so
+#LoadModule authz_dbd_module modules/mod_authz_dbd.so
+LoadModule authz_core_module modules/mod_authz_core.so
+LoadModule access_compat_module modules/mod_access_compat.so
+LoadModule auth_basic_module modules/mod_auth_basic.so
+#LoadModule auth_form_module modules/mod_auth_form.so
+#LoadModule auth_digest_module modules/mod_auth_digest.so
+#LoadModule allowmethods_module modules/mod_allowmethods.so
+#LoadModule file_cache_module modules/mod_file_cache.so
+#LoadModule cache_module modules/mod_cache.so
+#LoadModule cache_disk_module modules/mod_cache_disk.so
+#LoadModule cache_socache_module modules/mod_cache_socache.so
+#LoadModule socache_shmcb_module modules/mod_socache_shmcb.so
+#LoadModule socache_dbm_module modules/mod_socache_dbm.so
+#LoadModule socache_memcache_module modules/mod_socache_memcache.so
+#LoadModule socache_redis_module modules/mod_socache_redis.so
+#LoadModule watchdog_module modules/mod_watchdog.so
+#LoadModule macro_module modules/mod_macro.so
+#LoadModule dbd_module modules/mod_dbd.so
+#LoadModule dumpio_module modules/mod_dumpio.so
+#LoadModule echo_module modules/mod_echo.so
+#LoadModule buffer_module modules/mod_buffer.so
+#LoadModule data_module modules/mod_data.so
+#LoadModule ratelimit_module modules/mod_ratelimit.so
+LoadModule reqtimeout_module modules/mod_reqtimeout.so
+#LoadModule ext_filter_module modules/mod_ext_filter.so
+#LoadModule request_module modules/mod_request.so
+#LoadModule include_module modules/mod_include.so
+LoadModule filter_module modules/mod_filter.so
+#LoadModule reflector_module modules/mod_reflector.so
+#LoadModule substitute_module modules/mod_substitute.so
+#LoadModule sed_module modules/mod_sed.so
+#LoadModule charset_lite_module modules/mod_charset_lite.so
+#LoadModule deflate_module modules/mod_deflate.so
+LoadModule mime_module modules/mod_mime.so
+LoadModule log_config_module modules/mod_log_config.so
+#LoadModule log_debug_module modules/mod_log_debug.so
+#LoadModule log_forensic_module modules/mod_log_forensic.so
+#LoadModule logio_module modules/mod_logio.so
+LoadModule env_module modules/mod_env.so
+#LoadModule mime_magic_module modules/mod_mime_magic.so
+#LoadModule expires_module modules/mod_expires.so
+LoadModule headers_module modules/mod_headers.so
+#LoadModule usertrack_module modules/mod_usertrack.so
+#LoadModule unique_id_module modules/mod_unique_id.so
+LoadModule setenvif_module modules/mod_setenvif.so
+LoadModule version_module modules/mod_version.so
+#LoadModule remoteip_module modules/mod_remoteip.so
+#LoadModule session_module modules/mod_session.so
+#LoadModule session_cookie_module modules/mod_session_cookie.so
+#LoadModule session_crypto_module modules/mod_session_crypto.so
+#LoadModule session_dbd_module modules/mod_session_dbd.so
+#LoadModule slotmem_shm_module modules/mod_slotmem_shm.so
+#LoadModule slotmem_plain_module modules/mod_slotmem_plain.so
+#LoadModule dialup_module modules/mod_dialup.so
+#LoadModule http2_module modules/mod_http2.so
+LoadModule unixd_module modules/mod_unixd.so
+#LoadModule heartbeat_module modules/mod_heartbeat.so
+#LoadModule heartmonitor_module modules/mod_heartmonitor.so
+LoadModule status_module modules/mod_status.so
+LoadModule autoindex_module modules/mod_autoindex.so
+#LoadModule asis_module modules/mod_asis.so
+#LoadModule info_module modules/mod_info.so
+#LoadModule suexec_module modules/mod_suexec.so
+<IfModule !mpm_prefork_module>
+ LoadModule cgid_module modules/mod_cgid.so
+</IfModule>
+<IfModule mpm_prefork_module>
+ LoadModule cgi_module modules/mod_cgi.so
+</IfModule>
+#LoadModule vhost_alias_module modules/mod_vhost_alias.so
+#LoadModule negotiation_module modules/mod_negotiation.so
+LoadModule dir_module modules/mod_dir.so
+#LoadModule actions_module modules/mod_actions.so
+#LoadModule speling_module modules/mod_speling.so
+#LoadModule userdir_module modules/mod_userdir.so
+LoadModule alias_module modules/mod_alias.so
+#LoadModule rewrite_module modules/mod_rewrite.so
+LoadModule info_module modules/mod_info.so
+
+LoadModule negotiation_module modules/mod_negotiation.so
+
+<IfModule unixd_module>
+#
+# If you wish httpd to run as a different user or group, you must run
+# httpd as root initially and it will switch.
+#
+# User/Group: The name (or #number) of the user/group to run httpd as.
+# It is usually good practice to create a dedicated user and group for
+# running httpd, as with most system services.
+#
+User ${APACHE_RUN_USER}
+Group ${APACHE_RUN_GROUP}
+
+#
+# Timeout defines, in seconds, the amount of time that the server waits for
+# receipts and transmissions during communications. Timeout is set to 300
+# seconds by default, which is appropriate for most situations.
+#
+Timeout 300
+
+</IfModule>
+
+# 'Main' server configuration
+#
+# The directives in this section set up the values used by the 'main'
+# server, which responds to any requests that aren't handled by a
+# <VirtualHost> definition. These values also provide defaults for
+# any <VirtualHost> containers you may define later in the file.
+#
+# All of these directives may appear inside <VirtualHost> containers,
+# in which case these default settings will be overridden for the
+# virtual host being defined.
+#
+
+#
+# ServerAdmin: Your address, where problems with the server should be
+# e-mailed. This address appears on some server-generated pages, such
+# as error documents. e.g. admin@your-domain.com
+#
+# ServerAdmin you@example.com
+
+#
+# Optionally add a line containing the server version and virtual host
+# name to server-generated pages (internal error documents, FTP directory
+# listings, mod_status and mod_info output etc., but not CGI generated
+# documents or custom error documents).
+# Set to "EMail" to also include a mailto: link to the ServerAdmin.
+# Set to one of: On | Off | EMail
+#
+ServerSignature On
+
+#
+# ServerName gives the name and port that the server uses to identify itself.
+# This can often be determined automatically, but we recommend you specify
+# it explicitly to prevent problems during startup.
+#
+# If your host doesn't have a registered DNS name, enter its IP address here.
+#
+#ServerName www.example.com:80
+
+#
+# Deny access to the entirety of your server's filesystem. You must
+# explicitly permit access to web content directories in other
+# <Directory> blocks below.
+#
+<Directory />
+ AllowOverride none
+ Require all denied
+</Directory>
+
+#
+# Note that from this point forward you must specifically allow
+# particular features to be enabled - so if something's not working as
+# you might expect, make sure that you have specifically enabled it
+# below.
+#
+
+#
+# DocumentRoot: The directory out of which you will serve your
+# documents. By default, all requests are taken from this directory, but
+# symbolic links and aliases may be used to point to other locations.
+#
+DocumentRoot "/var/www/localhost/htdocs"
+<Directory "/var/www/localhost/htdocs">
+ #
+ # Possible values for the Options directive are "None", "All",
+ # or any combination of:
+ # Indexes Includes FollowSymLinks SymLinksifOwnerMatch ExecCGI MultiViews
+ #
+ # Note that "MultiViews" must be named *explicitly* --- "Options All"
+ # doesn't give it to you.
+ #
+ # The Options directive is both complicated and important. Please see
+ # http://httpd.apache.org/docs/2.4/mod/core.html#options
+ # for more information.
+ #
+ Options Indexes FollowSymLinks ExecCGI
+
+ #
+ # AllowOverride controls what directives may be placed in .htaccess files.
+ # It can be "All", "None", or any combination of the keywords:
+ # AllowOverride FileInfo AuthConfig Limit
+ #
+ AllowOverride None
+
+ #
+ # Controls who can get stuff from this server.
+ #
+ Require all granted
+</Directory>
+
+#
+# DirectoryIndex: sets the file that Apache will serve if a directory
+# is requested.
+#
+<IfModule dir_module>
+ DirectoryIndex index.html
+</IfModule>
+
+#
+# The following lines prevent .htaccess and .htpasswd files from being
+# viewed by Web clients.
+#
+<Files ".ht*">
+ Require all denied
+</Files>
+
+#
+# ErrorLog: The location of the error log file.
+# If you do not specify an ErrorLog directive within a <VirtualHost>
+# container, error messages relating to that virtual host will be
+# logged here. If you *do* define an error logfile for a <VirtualHost>
+# container, that host's errors will be logged there and not here.
+#
+ErrorLog ${APACHE_LOG_DIR}/error.log
+
+#
+# LogLevel: Control the number of messages logged to the error_log.
+# Possible values include: debug, info, notice, warn, error, crit,
+# alert, emerg.
+#
+LogLevel debug authz_core:warn
+
+<IfModule log_config_module>
+ #
+ # The following directives define some format nicknames for use with
+ # a CustomLog directive (see below).
+ #
+ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
+ LogFormat "%h %l %u %t \"%r\" %>s %b" common
+
+ <IfModule logio_module>
+ # You need to enable mod_logio.c to use %I and %O
+ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio
+ </IfModule>
+
+ #
+ # The location and format of the access logfile (Common Logfile Format).
+ # If you do not define any access logfiles within a <VirtualHost>
+ # container, they will be logged here. Contrariwise, if you *do*
+ # define per-<VirtualHost> access logfiles, transactions will be
+ # logged therein and *not* in this file.
+ #
+ #CustomLog logs/access.log common
+
+ #
+ # If you prefer a logfile with access, agent, and referer information
+ # (Combined Logfile Format) you can use the following directive.
+ #
+ CustomLog logs/access.log combined
+</IfModule>
+
+<IfModule alias_module>
+ #
+ # Redirect: Allows you to tell clients about documents that used to
+ # exist in your server's namespace, but do not anymore. The client
+ # will make a new request for the document at its new location.
+ # Example:
+ # Redirect permanent /foo http://www.example.com/bar
+
+ #
+ # Alias: Maps web paths into filesystem paths and is used to
+ # access content that does not live under the DocumentRoot.
+ # Example:
+ # Alias /webpath /full/filesystem/path
+ #
+ # If you include a trailing / on /webpath then the server will
+ # require it to be present in the URL. You will also likely
+ # need to provide a <Directory> section to allow access to
+ # the filesystem path.
+
+ #
+ # ScriptAlias: This controls which directories contain server scripts.
+ # ScriptAliases are essentially the same as Aliases, except that
+ # documents in the target directory are treated as applications and
+ # run by the server when requested rather than as documents sent to the
+ # client. The same rules about trailing "/" apply to ScriptAlias
+ # directives as to Alias.
+ #
+ ScriptAlias /cgi-bin/ "/var/www/localhost/cgi-bin/"
+
+</IfModule>
+
+<IfModule cgid_module>
+ #
+ # ScriptSock: On threaded servers, designate the path to the UNIX
+ # socket used to communicate with the CGI daemon of mod_cgid.
+ #
+ Scriptsock cgisock
+</IfModule>
+
+#
+# "/var/www/localhost/cgi-bin" should be changed to whatever your ScriptAliased
+# CGI directory exists, if you have that configured.
+#
+<Directory "/var/cgi">
+ AllowOverride None
+ Options None
+ Require all granted
+</Directory>
+
+<IfModule headers_module>
+ #
+ # Avoid passing HTTP_PROXY environment to CGI's on this or any proxied
+ # backend servers which have lingering "httpoxy" defects.
+ # 'Proxy' request header is undefined by the IETF, not listed by IANA
+ #
+ RequestHeader unset Proxy early
+</IfModule>
+
+<IfModule mime_module>
+ #
+ # TypesConfig points to the file containing the list of mappings from
+ # filename extension to MIME-type.
+ #
+ TypesConfig /etc/apache2/mime.types
+
+ #
+ # AddType allows you to add to or override the MIME configuration
+ # file specified in TypesConfig for specific file types.
+ #
+ #AddType application/x-gzip .tgz
+ #
+ # AddEncoding allows you to have certain browsers uncompress
+ # information on the fly. Note: Not all browsers support this.
+ #
+ #AddEncoding x-compress .Z
+ #AddEncoding x-gzip .gz .tgz
+ #
+ # If the AddEncoding directives above are commented-out, then you
+ # probably should define those extensions to indicate media types:
+ #
+ AddType application/x-compress .Z
+ AddType application/x-gzip .gz .tgz
+
+ #
+ # AddHandler allows you to map certain file extensions to "handlers":
+ # actions unrelated to filetype. These can be either built into the server
+ # or added with the Action directive (see below)
+ #
+ # To use CGI scripts outside of ScriptAliased directories:
+ # (You will also need to add "ExecCGI" to the "Options" directive.)
+ #
+ AddHandler cgi-script .cgi .sh
+
+ # For type maps (negotiated resources):
+ #AddHandler type-map var
+
+ #
+ # Filters allow you to process content before it is sent to the client.
+ #
+ # To parse .shtml files for server-side includes (SSI):
+ # (You will also need to add "Includes" to the "Options" directive.)
+ #
+ #AddType text/html .shtml
+ #AddOutputFilter INCLUDES .shtml
+</IfModule>
+
+<IfModule status_module>
+#
+# Allow server status reports generated by mod_status,
+# with the URL of http://servername/server-status
+# Change the ".example.com" to match your domain to enable.
+
+<Location /server-status>
+ SetHandler server-status
+ Require ip 127.0.0.1
+</Location>
+
+#
+# ExtendedStatus controls whether Apache will generate "full" status
+# information (ExtendedStatus On) or just basic information (ExtendedStatus
+# Off) when the "server-status" handler is called. The default is Off.
+#
+#ExtendedStatus On
+</IfModule>
+
+<IfModule info_module>
+#
+# Allow remote server configuration reports, with the URL of
+# http://servername/server-info (requires that mod_info.c be loaded).
+# Change the ".example.com" to match your domain to enable.
+#
+<Location /server-info>
+ SetHandler server-info
+ Require ip 127.0.0.1
+</Location>
+</IfModule>
+
+#
+# Customizable error responses come in three flavors:
+# 1) plain text 2) local redirects 3) external redirects
+#
+# Some examples:
+#ErrorDocument 500 "The server made a boo boo."
+#ErrorDocument 404 /missing.html
+#ErrorDocument 404 "/cgi-bin/missing_handler.pl"
+#ErrorDocument 402 http://www.example.com/subscription_info.html
+#
+
+#
+# MaxRanges: Maximum number of Ranges in a request before
+# returning the entire resource, or one of the special
+# values 'default', 'none' or 'unlimited'.
+# Default setting is to accept 200 Ranges.
+#MaxRanges unlimited
+
+#
+# EnableMMAP and EnableSendfile: On systems that support it,
+# memory-mapping or the sendfile syscall may be used to deliver
+# files. This usually improves server performance, but must
+# be turned off when serving from networked-mounted
+# filesystems or if support for these functions is otherwise
+# broken on your system.
+# Defaults: EnableMMAP On, EnableSendfile Off
+#
+#EnableMMAP off
+#EnableSendfile on
+
+# Load config files from the config directory "/etc/apache2/conf.d".
+#
+IncludeOptional /etc/apache2/conf.d/*.conf
diff --git a/charts/k8s-gerrit/container-images/apache-git-http-backend/config/logrotation b/charts/k8s-gerrit/container-images/apache-git-http-backend/config/logrotation
new file mode 100644
index 0000000..282b1e8
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/apache-git-http-backend/config/logrotation
@@ -0,0 +1,12 @@
+/var/log/apache2/*log {
+ daily
+ dateext
+ compress
+ delaycompress
+ missingok
+ notifempty
+ sharedscripts
+ postrotate
+ /etc/init.d/apache2 --quiet --ifstarted reload > /dev/null 2>/dev/null || true
+ endscript
+}
diff --git a/charts/k8s-gerrit/container-images/apache-git-http-backend/tools/project_admin.sh b/charts/k8s-gerrit/container-images/apache-git-http-backend/tools/project_admin.sh
new file mode 100755
index 0000000..7b46ac9
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/apache-git-http-backend/tools/project_admin.sh
@@ -0,0 +1,66 @@
+#!/bin/ash
+
+delete() {
+ rm -rf /var/gerrit/git/${REPO}.git
+
+ if ! test -f /var/gerrit/git/${REPO}.git; then
+ STATUS_CODE="204 No Content"
+ MESSAGE="Repository ${REPO} deleted."
+ else
+ MESSAGE="Repository ${REPO} could not be deleted."
+ fi
+}
+
+new() {
+ if test -d /var/gerrit/git/${REPO}.git; then
+ STATUS_CODE="200 OK"
+ MESSAGE="Repository already available."
+ else
+ git init --bare /var/gerrit/git/${REPO}.git > /dev/null
+ if test -f /var/gerrit/git/${REPO}.git/HEAD; then
+ STATUS_CODE="201 Created"
+ MESSAGE="Repository ${REPO} created."
+ else
+ MESSAGE="Repository ${REPO} could not be created."
+ fi
+ fi
+}
+
+update_head(){
+ read -n ${CONTENT_LENGTH} POST_STRING
+ NEW_HEAD=$(echo ${POST_STRING} | jq .ref - | tr -d '"')
+
+ git --git-dir /var/gerrit/git/${REPO}.git symbolic-ref HEAD ${NEW_HEAD}
+
+ if test "ref: ${NEW_HEAD}" == "$(cat /var/gerrit/git/${REPO}.git/HEAD)"; then
+ STATUS_CODE="200 OK"
+ MESSAGE="Repository HEAD updated to ${NEW_HEAD}."
+ else
+ MESSAGE="Repository HEAD could not be updated to ${NEW_HEAD}."
+ fi
+}
+
+echo "Content-type: text/html"
+REPO=${REQUEST_URI##/a/projects/}
+REPO="${REPO//%2F//}"
+REPO="${REPO%%.git}"
+
+if test "${REQUEST_METHOD}" == "PUT"; then
+ if [[ ${REQUEST_URI} == */HEAD ]]; then
+ REPO=${REPO%"/HEAD"}
+ update_head
+ else
+ new
+ fi
+elif test "${REQUEST_METHOD}" == "DELETE"; then
+ delete
+else
+ STATUS_CODE="400 Bad Request"
+ MESSAGE="Unknown method."
+fi
+
+test -z ${STATUS_CODE} && STATUS_CODE="500 Internal Server Error"
+
+echo "Status: ${STATUS_CODE}"
+echo ""
+echo "${MESSAGE}"
diff --git a/charts/k8s-gerrit/container-images/apache-git-http-backend/tools/start b/charts/k8s-gerrit/container-images/apache-git-http-backend/tools/start
new file mode 100755
index 0000000..6b47114
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/apache-git-http-backend/tools/start
@@ -0,0 +1,4 @@
+#!/bin/ash
+
+/usr/sbin/apachectl start \
+ && tail -F -q -n +1 /var/log/apache2/*.log
diff --git a/charts/k8s-gerrit/container-images/base/Dockerfile b/charts/k8s-gerrit/container-images/base/Dockerfile
new file mode 100644
index 0000000..120a1a0
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/base/Dockerfile
@@ -0,0 +1,11 @@
+ARG BASE_IMAGE=alpine:3.18.2
+FROM $BASE_IMAGE
+
+ENV LC_ALL=C.UTF-8
+ENV LANG=C.UTF-8
+
+RUN apk update && \
+ apk add --no-cache git
+
+ARG GERRIT_UID=1000
+RUN adduser -D gerrit -u $GERRIT_UID -G users
diff --git a/charts/k8s-gerrit/container-images/base/README.md b/charts/k8s-gerrit/container-images/base/README.md
new file mode 100644
index 0000000..59533cb
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/base/README.md
@@ -0,0 +1,10 @@
+# Base image
+
+This is the base Docker image for Gerrit deployment on Kubernetes.
+It is only used in the build process and not published on Dockerhub.
+
+## Content
+
+* Alpine Linux 3.10.0
+* git
+* create `gerrit`-user as a non-root user to run the applications
diff --git a/charts/k8s-gerrit/container-images/gerrit-base/Dockerfile b/charts/k8s-gerrit/container-images/gerrit-base/Dockerfile
new file mode 100644
index 0000000..1f08ac6
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-base/Dockerfile
@@ -0,0 +1,57 @@
+ARG TAG=latest
+FROM base:${TAG}
+
+RUN apk update && \
+ apk add --no-cache \
+ coreutils \
+ curl \
+ openssh-keygen \
+ openjdk11
+
+RUN mkdir -p /var/gerrit/bin && \
+ mkdir -p /var/gerrit/etc && \
+ mkdir -p /var/gerrit/plugins && \
+ mkdir -p /var/plugins && \
+ mkdir -p /var/war
+
+# Download Gerrit release
+# TODO: Revert back to use release versions as soon as change 383334 has been released
+ARG GERRIT_WAR_URL=https://gerrit-ci.gerritforge.com/view/Gerrit/job/Gerrit-bazel-stable-3.8/lastSuccessfulBuild/artifact/gerrit/bazel-bin/release.war
+RUN curl -k -o /var/war/gerrit.war ${GERRIT_WAR_URL} && \
+ ln -s /var/war/gerrit.war /var/gerrit/bin/gerrit.war
+
+# Download healthcheck plugin
+ARG HEALTHCHECK_JAR_URL=https://gerrit-ci.gerritforge.com/view/Plugins-stable-3.8/job/plugin-healthcheck-bazel-stable-3.8/lastSuccessfulBuild/artifact/bazel-bin/plugins/healthcheck/healthcheck.jar
+RUN curl -k -o /var/plugins/healthcheck.jar ${HEALTHCHECK_JAR_URL} && \
+ ln -s /var/plugins/healthcheck.jar /var/gerrit/plugins/healthcheck.jar
+
+# Download global-refdb lib
+ARG GLOBAL_REFDB_URL=https://gerrit-ci.gerritforge.com/view/Plugins-stable-3.8/job/module-global-refdb-bazel-stable-3.8/lastSuccessfulBuild/artifact/bazel-bin/plugins/global-refdb/global-refdb.jar
+RUN curl -k -o /var/plugins/global-refdb.jar ${GLOBAL_REFDB_URL}
+
+# Download high-availability plugin
+ARG HA_JAR_URL=https://gerrit-ci.gerritforge.com/view/Plugins-stable-3.8/job/plugin-high-availability-bazel-stable-3.8/lastSuccessfulBuild/artifact/bazel-bin/plugins/high-availability/high-availability.jar
+RUN curl -k -o /var/plugins/high-availability.jar ${HA_JAR_URL}
+
+# Download zookeeper-refdb plugin
+ARG ZOOKEEPER_REFDB_URL=https://gerrit-ci.gerritforge.com/view/Plugins-stable-3.8/job/plugin-zookeeper-refdb-bazel-stable-3.8/lastSuccessfulBuild/artifact/bazel-bin/plugins/zookeeper-refdb/zookeeper-refdb.jar
+RUN curl -k -o /var/plugins/zookeeper-refdb.jar ${ZOOKEEPER_REFDB_URL}
+
+# Download spanner-refdb plugin
+ARG SPANNER_REFDB_URL=https://gerrit-ci.gerritforge.com/view/Plugins-stable-3.8/job/plugin-spanner-refdb-bazel-master-stable-3.8/lastSuccessfulBuild/artifact/bazel-bin/plugins/spanner-refdb/spanner-refdb.jar
+RUN curl -k -o /var/plugins/spanner-refdb.jar ${SPANNER_REFDB_URL}
+
+# Allow incoming traffic
+EXPOSE 29418 8080
+
+RUN chown -R gerrit:users /var/gerrit && \
+ chown -R gerrit:users /var/plugins && \
+ chown -R gerrit:users /var/war
+USER gerrit
+
+RUN java -jar /var/gerrit/bin/gerrit.war init \
+ --batch \
+ --no-auto-start \
+ -d /var/gerrit
+
+ENTRYPOINT ["ash", "/var/tools/start"]
diff --git a/charts/k8s-gerrit/container-images/gerrit-base/README.md b/charts/k8s-gerrit/container-images/gerrit-base/README.md
new file mode 100644
index 0000000..4d67048
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-base/README.md
@@ -0,0 +1,25 @@
+# Gerrit base image
+
+Gerrit base image for Gerrit and Gerrit replica images.
+It is only used in the build process and not published on Dockerhub.
+
+## Content
+
+* base image
+* curl
+* openssh-keygen
+* OpenJDK 11
+* gerrit.war
+
+## Setup and configuration
+
+* install package dependencies
+* create base folders for gerrit binary and gerrit configuration
+* download gerrit.war from provided URL
+* prepare filesystem permissions for gerrit user
+* open ports for incoming traffic
+* initialize default Gerrit site
+
+## Start
+
+* starts the container via start script `/var/tools/start`
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/.dockerignore b/charts/k8s-gerrit/container-images/gerrit-init/.dockerignore
new file mode 100644
index 0000000..7c68535
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/.dockerignore
@@ -0,0 +1 @@
+tools/__pycache__
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/Dockerfile b/charts/k8s-gerrit/container-images/gerrit-init/Dockerfile
new file mode 100644
index 0000000..70da7aa
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/Dockerfile
@@ -0,0 +1,29 @@
+ARG TAG=latest
+FROM gerrit-base:${TAG}
+
+USER root
+
+COPY dependencies/* /var/tools/
+COPY requirements.txt /var/tools/
+WORKDIR /var/tools
+
+RUN apk update && \
+ apk add --no-cache \
+ python3 && \
+ python3 -m ensurepip && \
+ rm -r /usr/lib/python*/ensurepip && \
+ # follow https://til.simonwillison.net/python/pip-tools to update hashes
+ pip3 install --require-hashes -r requirements.txt --no-cache --upgrade && \
+ pipenv install --python 3.11 --system
+
+COPY tools /var/tools/
+COPY config/* /var/config/
+
+RUN mkdir -p /var/mnt/git \
+ && mkdir -p /var/mnt/logs \
+ && chown -R gerrit:users /var/mnt
+
+USER gerrit
+
+ENTRYPOINT ["python3", "/var/tools/gerrit-initializer"]
+CMD ["-s", "/var/gerrit", "-c", "/var/config/gerrit-init.yaml", "init"]
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/README.md b/charts/k8s-gerrit/container-images/gerrit-init/README.md
new file mode 100644
index 0000000..37b5bda
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/README.md
@@ -0,0 +1,64 @@
+# Gerrit replica init container image
+
+Kubernetes init container for initializing gerrit. The python script running in
+the container initializes Gerrit including the installation of configured
+plugins.
+
+## Content
+
+* gerrit-base image
+
+## Setup and configuration
+
+* install python 3
+* copy tool scripts
+
+## Start
+
+* start the container via start script `python3 /var/tools/gerrit-initializer init`
+
+The `main.py init`-command
+
+* reads configuration from gerrit.config (via `gerrit_config_parser.py`)
+* initializes Gerrit
+
+The `main.py validate_notedb`-command
+
+* validates and waits for the repository `All-Projects.git` with the refs
+`refs/meta/config`.
+* validates and waits for the repository `All-Users.git` with the ref
+`refs/meta/config`.
+
+## Configuration
+
+The configuration format looks as follows:
+
+```yaml
+plugins: []
+# A plugin packaged in the gerrit.war-file
+# - name: download-commands
+
+# A plugin packaged in the gerrit.war-file that will also be installed as a
+# lib
+# - name: replication
+# installAsLibrary: true
+
+# A plugin that will be downloaded on startup
+# - name: delete-project
+# url: https://example.com/gerrit-plugins/delete-project.jar
+# sha1:
+# installAsLibrary: false
+libs: []
+# A lib that will be downloaded on startup
+# - name: global-refdb
+# url: https://example.com/gerrit-plugins/global-refdb.jar
+# sha1:
+#DEPRECATED: `pluginCache` was deprecated in favor of `pluginCacheEnabled`
+# pluginCache: true
+pluginCacheEnabled: false
+pluginCacheDir: null
+# Can be either true to use default CA certificates, false to disable SSL
+# verification or a path to a custom CA certificate store.
+caCertPath: true
+highAvailability: false
+```
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/config/gerrit-init.yaml b/charts/k8s-gerrit/container-images/gerrit-init/config/gerrit-init.yaml
new file mode 100644
index 0000000..65b7b28
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/config/gerrit-init.yaml
@@ -0,0 +1 @@
+pluginCache: false
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/dependencies/Pipfile b/charts/k8s-gerrit/container-images/gerrit-init/dependencies/Pipfile
new file mode 100644
index 0000000..6a55c64
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/dependencies/Pipfile
@@ -0,0 +1,13 @@
+[[source]]
+name = "pypi"
+url = "https://pypi.org/simple"
+verify_ssl = true
+
+[dev-packages]
+
+[packages]
+pyyaml = "~=6.0"
+requests = "~=2.31.0"
+
+[requires]
+python_version = "3.11"
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/dependencies/Pipfile.lock b/charts/k8s-gerrit/container-images/gerrit-init/dependencies/Pipfile.lock
new file mode 100644
index 0000000..10e814e
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/dependencies/Pipfile.lock
@@ -0,0 +1,180 @@
+{
+ "_meta": {
+ "hash": {
+ "sha256": "bf7e62c1c2c8f726ef7dab0c66bddf079c2f4cee97a1d5a4d4546fcc4f41600f"
+ },
+ "pipfile-spec": 6,
+ "requires": {
+ "python_version": "3.11"
+ },
+ "sources": [
+ {
+ "name": "pypi",
+ "url": "https://pypi.org/simple",
+ "verify_ssl": true
+ }
+ ]
+ },
+ "default": {
+ "certifi": {
+ "hashes": [
+ "sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7",
+ "sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716"
+ ],
+ "markers": "python_version >= '3.6'",
+ "version": "==2023.5.7"
+ },
+ "charset-normalizer": {
+ "hashes": [
+ "sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96",
+ "sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c",
+ "sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710",
+ "sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706",
+ "sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020",
+ "sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252",
+ "sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad",
+ "sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329",
+ "sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a",
+ "sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f",
+ "sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6",
+ "sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4",
+ "sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a",
+ "sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46",
+ "sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2",
+ "sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23",
+ "sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace",
+ "sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd",
+ "sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982",
+ "sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10",
+ "sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2",
+ "sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea",
+ "sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09",
+ "sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5",
+ "sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149",
+ "sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489",
+ "sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9",
+ "sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80",
+ "sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592",
+ "sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3",
+ "sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6",
+ "sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed",
+ "sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c",
+ "sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200",
+ "sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a",
+ "sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e",
+ "sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d",
+ "sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6",
+ "sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623",
+ "sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669",
+ "sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3",
+ "sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa",
+ "sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9",
+ "sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2",
+ "sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f",
+ "sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1",
+ "sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4",
+ "sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a",
+ "sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8",
+ "sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3",
+ "sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029",
+ "sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f",
+ "sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959",
+ "sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22",
+ "sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7",
+ "sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952",
+ "sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346",
+ "sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e",
+ "sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d",
+ "sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299",
+ "sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd",
+ "sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a",
+ "sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3",
+ "sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037",
+ "sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94",
+ "sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c",
+ "sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858",
+ "sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a",
+ "sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449",
+ "sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c",
+ "sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918",
+ "sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1",
+ "sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c",
+ "sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac",
+ "sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa"
+ ],
+ "markers": "python_full_version >= '3.7.0'",
+ "version": "==3.2.0"
+ },
+ "idna": {
+ "hashes": [
+ "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4",
+ "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"
+ ],
+ "markers": "python_version >= '3.5'",
+ "version": "==3.4"
+ },
+ "pyyaml": {
+ "hashes": [
+ "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc",
+ "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741",
+ "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206",
+ "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27",
+ "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595",
+ "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62",
+ "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98",
+ "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696",
+ "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d",
+ "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867",
+ "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47",
+ "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486",
+ "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6",
+ "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3",
+ "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007",
+ "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938",
+ "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c",
+ "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735",
+ "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d",
+ "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba",
+ "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8",
+ "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5",
+ "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd",
+ "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3",
+ "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0",
+ "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515",
+ "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c",
+ "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c",
+ "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924",
+ "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34",
+ "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43",
+ "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859",
+ "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673",
+ "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a",
+ "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab",
+ "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa",
+ "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c",
+ "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585",
+ "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d",
+ "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"
+ ],
+ "index": "pypi",
+ "version": "==6.0.1"
+ },
+ "requests": {
+ "hashes": [
+ "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f",
+ "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"
+ ],
+ "index": "pypi",
+ "version": "==2.31.0"
+ },
+ "urllib3": {
+ "hashes": [
+ "sha256:48e7fafa40319d358848e1bc6809b208340fafe2096f1725d05d67443d0483d1",
+ "sha256:bee28b5e56addb8226c96f7f13ac28cb4c301dd5ea8a6ca179c0b9835e032825"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==2.0.3"
+ }
+ },
+ "develop": {}
+}
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/requirements.in b/charts/k8s-gerrit/container-images/gerrit-init/requirements.in
new file mode 100644
index 0000000..a7f62a1
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/requirements.in
@@ -0,0 +1,3 @@
+setuptools
+wheel
+pipenv
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/requirements.txt b/charts/k8s-gerrit/container-images/gerrit-init/requirements.txt
new file mode 100644
index 0000000..c2de41b
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/requirements.txt
@@ -0,0 +1,46 @@
+#
+# This file is autogenerated by pip-compile with Python 3.10
+# by the following command:
+#
+# pip-compile --allow-unsafe --generate-hashes requirements.in
+#
+certifi==2022.12.7 \
+ --hash=sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3 \
+ --hash=sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18
+ # via pipenv
+distlib==0.3.6 \
+ --hash=sha256:14bad2d9b04d3a36127ac97f30b12a19268f211063d8f8ee4f47108896e11b46 \
+ --hash=sha256:f35c4b692542ca110de7ef0bea44d73981caeb34ca0b9b6b2e6d7790dda8f80e
+ # via virtualenv
+filelock==3.9.0 \
+ --hash=sha256:7b319f24340b51f55a2bf7a12ac0755a9b03e718311dac567a0f4f7fabd2f5de \
+ --hash=sha256:f58d535af89bb9ad5cd4df046f741f8553a418c01a7856bf0d173bbc9f6bd16d
+ # via virtualenv
+pipenv==2023.2.18 \
+ --hash=sha256:4e45226d197ad84fa11a9d944cb0e1bfcc197919944d0af96e55adf7e1fdc76c \
+ --hash=sha256:ecbe4e301616c5fa3128d557507d79a35d895cd922139969929d7357b66b1509
+ # via -r requirements.in
+platformdirs==3.0.0 \
+ --hash=sha256:8a1228abb1ef82d788f74139988b137e78692984ec7b08eaa6c65f1723af28f9 \
+ --hash=sha256:b1d5eb14f221506f50d6604a561f4c5786d9e80355219694a1b244bcd96f4567
+ # via virtualenv
+virtualenv==20.19.0 \
+ --hash=sha256:37a640ba82ed40b226599c522d411e4be5edb339a0c0de030c0dc7b646d61590 \
+ --hash=sha256:54eb59e7352b573aa04d53f80fc9736ed0ad5143af445a1e539aada6eb947dd1
+ # via pipenv
+virtualenv-clone==0.5.7 \
+ --hash=sha256:418ee935c36152f8f153c79824bb93eaf6f0f7984bae31d3f48f350b9183501a \
+ --hash=sha256:44d5263bceed0bac3e1424d64f798095233b64def1c5689afa43dc3223caf5b0
+ # via pipenv
+wheel==0.38.4 \
+ --hash=sha256:965f5259b566725405b05e7cf774052044b1ed30119b5d586b2703aafe8719ac \
+ --hash=sha256:b60533f3f5d530e971d6737ca6d58681ee434818fab630c83a734bb10c083ce8
+ # via -r requirements.in
+
+# The following packages are considered to be unsafe in a requirements file:
+setuptools==67.4.0 \
+ --hash=sha256:e5fd0a713141a4a105412233c63dc4e17ba0090c8e8334594ac790ec97792330 \
+ --hash=sha256:f106dee1b506dee5102cc3f3e9e68137bbad6d47b616be7991714b0c62204251
+ # via
+ # -r requirements.in
+ # pipenv
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/__main__.py b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/__main__.py
new file mode 100644
index 0000000..e49cc31
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/__main__.py
@@ -0,0 +1,18 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from main import main
+
+if __name__ == "__main__":
+ main()
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/__init__.py b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/__init__.py
new file mode 100644
index 0000000..2230656
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/__init__.py
@@ -0,0 +1,13 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/config/__init__.py b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/config/__init__.py
new file mode 100644
index 0000000..2230656
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/config/__init__.py
@@ -0,0 +1,13 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/config/init_config.py b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/config/init_config.py
new file mode 100644
index 0000000..68a6f00
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/config/init_config.py
@@ -0,0 +1,90 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os.path
+
+import yaml
+
+
+class InitConfig:
+ def __init__(self):
+ self.plugins = []
+ self.libs = []
+ self.plugin_cache_enabled = False
+ self.plugin_cache_dir = None
+
+ self.ca_cert_path = True
+
+ self.is_ha = False
+ self.refdb = False
+
+ def parse(self, config_file):
+ if not os.path.exists(config_file):
+ raise FileNotFoundError(f"Could not find config file: {config_file}")
+
+ with open(config_file, "r", encoding="utf-8") as f:
+ config = yaml.load(f, Loader=yaml.SafeLoader)
+
+ if config is None:
+ raise ValueError(f"Invalid config-file: {config_file}")
+
+ if "plugins" in config:
+ self.plugins = config["plugins"]
+ if "libs" in config:
+ self.libs = config["libs"]
+ # DEPRECATED: `pluginCache` was deprecated in favor of `pluginCacheEnabled`
+ if "pluginCache" in config:
+ self.plugin_cache_enabled = config["pluginCache"]
+ if "pluginCacheEnabled" in config:
+ self.plugin_cache_enabled = config["pluginCacheEnabled"]
+ if "pluginCacheDir" in config and config["pluginCacheDir"]:
+ self.plugin_cache_dir = config["pluginCacheDir"]
+
+ if "caCertPath" in config:
+ self.ca_cert_path = config["caCertPath"]
+
+ self.is_ha = "highAvailability" in config and config["highAvailability"]
+ if "refdb" in config:
+ self.refdb = config["refdb"]
+
+ return self
+
+ def get_plugins(self):
+ return self.plugins
+
+ def get_plugin_names(self):
+ return set([p["name"] for p in self.plugins])
+
+ def get_libs(self):
+ return self.libs
+
+ def get_lib_names(self):
+ return set([p["name"] for p in self.libs])
+
+ def get_packaged_plugins(self):
+ return list(filter(lambda x: "url" not in x, self.plugins))
+
+ def get_downloaded_plugins(self):
+ return list(filter(lambda x: "url" in x, self.plugins))
+
+ def get_plugins_installed_as_lib(self):
+ return [
+ lib["name"]
+ for lib in list(
+ filter(
+ lambda x: "installAsLibrary" in x and x["installAsLibrary"],
+ self.plugins,
+ )
+ )
+ ]
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/helpers/__init__.py b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/helpers/__init__.py
new file mode 100644
index 0000000..2230656
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/helpers/__init__.py
@@ -0,0 +1,13 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/helpers/git.py b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/helpers/git.py
new file mode 100644
index 0000000..f21b28d
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/helpers/git.py
@@ -0,0 +1,76 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import subprocess
+
+
+class GitConfigParser:
+ def __init__(self, config_path):
+ self.path = config_path
+
+ def _execute_shell_command_and_get_output_lines(self, command):
+ sub_process_run = subprocess.run(
+ command.split(), stdout=subprocess.PIPE, check=True, universal_newlines=True
+ )
+ return [line.strip() for line in sub_process_run.stdout.splitlines()]
+
+ def _get_value(self, key):
+ command = f"git config -f {self.path} --get {key}"
+ return self._execute_shell_command_and_get_output_lines(command)
+
+ def list(self):
+ command = f"git config -f {self.path} --list"
+ options = self._execute_shell_command_and_get_output_lines(command)
+ option_list = []
+ for opt in options:
+ parsed_opt = {}
+ full_key, value = opt.split("=", 1)
+ parsed_opt["value"] = value
+ full_key = full_key.split(".")
+ parsed_opt["section"] = full_key[0]
+ if len(full_key) == 2:
+ parsed_opt["subsection"] = None
+ parsed_opt["key"] = full_key[1]
+ elif len(full_key) == 3:
+ parsed_opt["subsection"] = full_key[1]
+ parsed_opt["key"] = full_key[2]
+ option_list.append(parsed_opt)
+
+ return option_list
+
+ def get(self, key, default=None):
+ """
+ Returns value of given key in the configuration file. If the key appears
+ multiple times, the last value is returned.
+ """
+ try:
+ return self._get_value(key)[-1]
+ except subprocess.CalledProcessError:
+ return default
+
+ def get_boolean(self, key, default=False):
+ """
+ Returns boolean value of given key in the configuration file. If the key
+ appears multiple times, the last value is returned.
+ """
+ if not isinstance(default, bool):
+ raise TypeError("Default has to be a boolean.")
+
+ try:
+ value = self._get_value(key)[-1].lower()
+ if value not in ["true", "false"]:
+ raise TypeError("Value is not a boolean.")
+ return value == "true"
+ except subprocess.CalledProcessError:
+ return default
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/helpers/log.py b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/helpers/log.py
new file mode 100644
index 0000000..06aa72c
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/helpers/log.py
@@ -0,0 +1,26 @@
+#!/usr/bin/python3
+
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+
+def get_logger(name):
+ log = logging.Logger(name)
+ handler = logging.StreamHandler()
+ handler.setFormatter(logging.Formatter("[%(asctime)s] %(levelname)s %(message)s"))
+ log.addHandler(handler)
+ log.setLevel(logging.DEBUG)
+ return log
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/tasks/__init__.py b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/tasks/__init__.py
new file mode 100644
index 0000000..2230656
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/tasks/__init__.py
@@ -0,0 +1,13 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/tasks/download_plugins.py b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/tasks/download_plugins.py
new file mode 100755
index 0000000..2c9ace0
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/tasks/download_plugins.py
@@ -0,0 +1,372 @@
+#!/usr/bin/python3
+
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import hashlib
+import os
+import shutil
+import time
+
+from abc import ABC, abstractmethod
+from zipfile import ZipFile
+
+import requests
+
+from ..helpers import log
+
+LOG = log.get_logger("init")
+MAX_LOCK_LIFETIME = 60
+MAX_CACHED_VERSIONS = 5
+
+REQUIRED_PLUGINS = ["healthcheck"]
+REQUIRED_HA_PLUGINS = ["high-availability"]
+REQUIRED_HA_LIBS = ["high-availability", "global-refdb"]
+
+
+class InvalidPluginException(Exception):
+ """Exception to be raised, if the downloaded plugin is not valid."""
+
+
+class MissingRequiredPluginException(Exception):
+ """Exception to be raised, if the downloaded plugin is not valid."""
+
+
+class AbstractPluginInstaller(ABC):
+ def __init__(self, site, config):
+ self.site = site
+ self.config = config
+
+ self.required_plugins = self._get_required_plugins()
+ self.required_libs = self._get_required_libs()
+
+ self.plugin_dir = os.path.join(site, "plugins")
+ self.lib_dir = os.path.join(site, "lib")
+ self.plugins_changed = False
+
+ def _create_plugins_dir(self):
+ if not os.path.exists(self.plugin_dir):
+ os.makedirs(self.plugin_dir)
+ LOG.info("Created plugin installation directory: %s", self.plugin_dir)
+
+ def _create_lib_dir(self):
+ if not os.path.exists(self.lib_dir):
+ os.makedirs(self.lib_dir)
+ LOG.info("Created lib installation directory: %s", self.lib_dir)
+
+ def _get_installed_plugins(self):
+ return self._get_installed_jars(self.plugin_dir)
+
+ def _get_installed_libs(self):
+ return self._get_installed_jars(self.lib_dir)
+
+ @staticmethod
+ def _get_installed_jars(dir):
+ if os.path.exists(dir):
+ return [f for f in os.listdir(dir) if f.endswith(".jar")]
+
+ return []
+
+ def _get_required_plugins(self):
+ required = REQUIRED_PLUGINS.copy()
+ if self.config.is_ha:
+ required.extend(REQUIRED_HA_PLUGINS)
+ if self.config.refdb:
+ required.append(f"{self.config.refdb}-refdb")
+ LOG.info("Requiring plugins: %s", required)
+ return required
+
+ def _get_required_libs(self):
+ required = []
+ if self.config.is_ha:
+ required.extend(REQUIRED_HA_LIBS)
+ LOG.info("Requiring libs: %s", required)
+ return required
+
+ def _install_required_plugins(self):
+ for plugin in self.required_plugins:
+ if plugin in self.config.get_plugin_names():
+ continue
+
+ self._install_required_jar(plugin, self.plugin_dir)
+
+ def _install_required_libs(self):
+ for lib in self.required_libs:
+ if lib in self.config.get_lib_names():
+ continue
+
+ self._install_required_jar(lib, self.lib_dir)
+
+ def _install_required_jar(self, jar, target_dir):
+ with ZipFile("/var/war/gerrit.war", "r") as war:
+ # Lib modules can be packaged as a plugin. However, they could
+ # currently not be installed by the init pgm tool.
+ if f"WEB-INF/plugins/{jar}.jar" in war.namelist():
+ self._install_plugin_from_war(jar, target_dir)
+ return
+ try:
+ self._install_jar_from_container(jar, target_dir)
+ except FileNotFoundError:
+ raise MissingRequiredPluginException(f"Required jar {jar} was not found.")
+
+ def _install_jar_from_container(self, plugin, target_dir):
+ source_file = os.path.join("/var/plugins", plugin + ".jar")
+ target_file = os.path.join(target_dir, plugin + ".jar")
+ LOG.info(
+ "Installing plugin %s from container to %s.",
+ plugin,
+ target_file,
+ )
+ if not os.path.exists(source_file):
+ raise FileNotFoundError(
+ "Unable to find required plugin in container: " + plugin
+ )
+ if os.path.exists(target_file) and self._get_file_sha(
+ source_file
+ ) == self._get_file_sha(target_file):
+ return
+
+ shutil.copyfile(source_file, target_file)
+ self.plugins_changed = True
+
+ def _install_plugins_from_war(self):
+ for plugin in self.config.get_packaged_plugins():
+ self._install_plugin_from_war(plugin["name"], self.plugin_dir)
+
+ def _install_plugin_from_war(self, plugin, target_dir):
+ LOG.info("Installing packaged plugin %s.", plugin)
+ with ZipFile("/var/war/gerrit.war", "r") as war:
+ war.extract(f"WEB-INF/plugins/{plugin}.jar", self.plugin_dir)
+
+ source_file = f"{self.plugin_dir}/WEB-INF/plugins/{plugin}.jar"
+ target_file = os.path.join(target_dir, f"{plugin}.jar")
+ if not os.path.exists(target_file) or self._get_file_sha(
+ source_file
+ ) != self._get_file_sha(target_file):
+ os.rename(source_file, target_file)
+ self.plugins_changed = True
+
+ shutil.rmtree(os.path.join(self.plugin_dir, "WEB-INF"), ignore_errors=True)
+
+ @staticmethod
+ def _get_file_sha(file):
+ file_hash = hashlib.sha1()
+ with open(file, "rb") as f:
+ while True:
+ chunk = f.read(64000)
+ if not chunk:
+ break
+ file_hash.update(chunk)
+
+ LOG.debug("SHA1 of file '%s' is %s", file, file_hash.hexdigest())
+
+ return file_hash.hexdigest()
+
+ def _remove_unwanted_plugins(self):
+ wanted_plugins = list(self.config.get_plugins())
+ wanted_plugins.extend(self.required_plugins)
+ self._remove_unwanted(
+ wanted_plugins, self._get_installed_plugins(), self.plugin_dir
+ )
+
+ def _remove_unwanted_libs(self):
+ wanted_libs = list(self.config.get_libs())
+ wanted_libs.extend(self.required_libs)
+ wanted_libs.extend(self.config.get_plugins_installed_as_lib())
+ self._remove_unwanted(wanted_libs, self._get_installed_libs(), self.lib_dir)
+
+ @staticmethod
+ def _remove_unwanted(wanted, installed, dir):
+ for plugin in installed:
+ if os.path.splitext(plugin)[0] not in wanted:
+ os.remove(os.path.join(dir, plugin))
+ LOG.info("Removed plugin %s", plugin)
+
+ def _symlink_plugins_to_lib(self):
+ if not os.path.exists(self.lib_dir):
+ os.makedirs(self.lib_dir)
+ else:
+ for f in os.listdir(self.lib_dir):
+ path = os.path.join(self.lib_dir, f)
+ if (
+ os.path.islink(path)
+ and os.path.splitext(f)[0]
+ not in self.config.get_plugins_installed_as_lib()
+ ):
+ os.unlink(path)
+ LOG.info("Removed symlink %s", f)
+ for lib in self.config.get_plugins_installed_as_lib():
+ plugin_path = os.path.join(self.plugin_dir, f"{lib}.jar")
+ if os.path.exists(plugin_path):
+ try:
+ os.symlink(plugin_path, os.path.join(self.lib_dir, f"{lib}.jar"))
+ except FileExistsError:
+ continue
+ else:
+ raise FileNotFoundError(
+ f"Could not find plugin {lib} to symlink to lib-directory."
+ )
+
+ def execute(self):
+ self._create_plugins_dir()
+ self._create_lib_dir()
+
+ self._remove_unwanted_plugins()
+ self._remove_unwanted_libs()
+
+ self._install_required_plugins()
+ self._install_required_libs()
+
+ self._install_plugins_from_war()
+
+ for plugin in self.config.get_downloaded_plugins():
+ self._install_plugin(plugin)
+
+ for plugin in self.config.get_libs():
+ self._install_lib(plugin)
+
+ self._symlink_plugins_to_lib()
+
+ def _download_plugin(self, plugin, target):
+ LOG.info("Downloading %s plugin to %s", plugin["name"], target)
+ try:
+ response = requests.get(plugin["url"])
+ except requests.exceptions.SSLError:
+ response = requests.get(plugin["url"], verify=self.config.ca_cert_path)
+
+ with open(target, "wb") as f:
+ f.write(response.content)
+
+ file_sha = self._get_file_sha(target)
+
+ if file_sha != plugin["sha1"]:
+ os.remove(target)
+ raise InvalidPluginException(
+ (
+ f"SHA1 of downloaded file ({file_sha}) did not match "
+ f"expected SHA1 ({plugin['sha1']}). "
+ f"Removed downloaded file ({target})"
+ )
+ )
+
+ def _install_plugin(self, plugin):
+ self._install_jar(plugin, self.plugin_dir)
+
+ def _install_lib(self, lib):
+ self._install_jar(lib, self.lib_dir)
+
+ @abstractmethod
+ def _install_jar(self, plugin, target_dir):
+ pass
+
+
+class PluginInstaller(AbstractPluginInstaller):
+ def _install_jar(self, plugin, target_dir):
+ target = os.path.join(target_dir, f"{plugin['name']}.jar")
+ if os.path.exists(target) and self._get_file_sha(target) == plugin["sha1"]:
+ return
+
+ self._download_plugin(plugin, target)
+
+ self.plugins_changed = True
+
+
+class CachedPluginInstaller(AbstractPluginInstaller):
+ @staticmethod
+ def _cleanup_cache(plugin_cache_dir):
+ cached_files = [
+ os.path.join(plugin_cache_dir, f) for f in os.listdir(plugin_cache_dir)
+ ]
+ while len(cached_files) > MAX_CACHED_VERSIONS:
+ oldest_file = min(cached_files, key=os.path.getctime)
+ LOG.info(
+ "Too many cached files in %s. Removing file %s",
+ plugin_cache_dir,
+ oldest_file,
+ )
+ os.remove(oldest_file)
+ cached_files.remove(oldest_file)
+
+ @staticmethod
+ def _create_download_lock(lock_path):
+ with open(lock_path, "w", encoding="utf-8") as f:
+ f.write(os.environ["HOSTNAME"])
+ LOG.debug("Created download lock %s", lock_path)
+
+ @staticmethod
+ def _create_plugin_cache_dir(plugin_cache_dir):
+ if not os.path.exists(plugin_cache_dir):
+ os.makedirs(plugin_cache_dir)
+ LOG.info("Created cache directory %s", plugin_cache_dir)
+
+ def _get_cached_plugin_path(self, plugin):
+ return os.path.join(
+ self.config.plugin_cache_dir,
+ plugin["name"],
+ f"{plugin['name']}-{plugin['sha1']}.jar",
+ )
+
+ def _install_from_cache_or_download(self, plugin, target):
+ cached_plugin_path = self._get_cached_plugin_path(plugin)
+
+ if os.path.exists(cached_plugin_path):
+ LOG.info("Installing %s plugin from cache.", plugin["name"])
+ else:
+ LOG.info("%s not found in cache. Downloading it.", plugin["name"])
+ self._create_plugin_cache_dir(os.path.dirname(cached_plugin_path))
+
+ lock_path = f"{cached_plugin_path}.lock"
+ while os.path.exists(lock_path):
+ LOG.info(
+ "Download lock found (%s). Waiting %d seconds for it to be released.",
+ lock_path,
+ MAX_LOCK_LIFETIME,
+ )
+ lock_timestamp = os.path.getmtime(lock_path)
+ if time.time() > lock_timestamp + MAX_LOCK_LIFETIME:
+ LOG.info("Stale download lock found (%s).", lock_path)
+ self._remove_download_lock(lock_path)
+
+ self._create_download_lock(lock_path)
+
+ try:
+ self._download_plugin(plugin, cached_plugin_path)
+ finally:
+ self._remove_download_lock(lock_path)
+
+ shutil.copy(cached_plugin_path, target)
+ self._cleanup_cache(os.path.dirname(cached_plugin_path))
+
+ def _install_jar(self, plugin, target_dir):
+ install_path = os.path.join(target_dir, f"{plugin['name']}.jar")
+ if (
+ os.path.exists(install_path)
+ and self._get_file_sha(install_path) == plugin["sha1"]
+ ):
+ return
+
+ self.plugins_changed = True
+ self._install_from_cache_or_download(plugin, install_path)
+
+ @staticmethod
+ def _remove_download_lock(lock_path):
+ os.remove(lock_path)
+ LOG.debug("Removed download lock %s", lock_path)
+
+
+def get_installer(site, config):
+ plugin_installer = (
+ CachedPluginInstaller if config.plugin_cache_enabled else PluginInstaller
+ )
+ return plugin_installer(site, config)
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/tasks/init.py b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/tasks/init.py
new file mode 100755
index 0000000..4931984
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/tasks/init.py
@@ -0,0 +1,227 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import shutil
+import subprocess
+import sys
+
+from ..helpers import git, log
+from .download_plugins import get_installer
+from .reindex import IndexType, get_reindexer
+from .validate_notedb import NoteDbValidator
+
+LOG = log.get_logger("init")
+MNT_PATH = "/var/mnt"
+
+
+class GerritInit:
+ def __init__(self, site, config):
+ self.site = site
+ self.config = config
+
+ self.plugin_installer = get_installer(self.site, self.config)
+
+ self.gerrit_config = git.GitConfigParser(
+ os.path.join(MNT_PATH, "etc/config/gerrit.config")
+ )
+ self.is_online_reindex = self.gerrit_config.get_boolean(
+ "index.onlineUpgrade", True
+ )
+ self.force_offline_reindex = False
+ self.installed_plugins = self._get_installed_plugins()
+
+ self.is_replica = self.gerrit_config.get_boolean("container.replica")
+ self.pid_file = f"{self.site}/logs/gerrit.pid"
+
+ def _get_gerrit_version(self, gerrit_war_path):
+ command = f"java -jar {gerrit_war_path} version"
+ version_process = subprocess.run(
+ command.split(), stdout=subprocess.PIPE, check=True
+ )
+ return version_process.stdout.decode().strip()
+
+ def _get_installed_plugins(self):
+ plugin_path = os.path.join(self.site, "plugins")
+ installed_plugins = set()
+
+ if os.path.exists(plugin_path):
+ for f in os.listdir(plugin_path):
+ if os.path.isfile(os.path.join(plugin_path, f)) and f.endswith(".jar"):
+ installed_plugins.add(os.path.splitext(f)[0])
+
+ return installed_plugins
+
+ def _gerrit_war_updated(self):
+ installed_war_path = os.path.join(self.site, "bin", "gerrit.war")
+ installed_version = self._get_gerrit_version(installed_war_path)
+ provided_version = self._get_gerrit_version("/var/war/gerrit.war")
+ LOG.info(
+ "Installed Gerrit version: %s; Provided Gerrit version: %s). ",
+ installed_version,
+ provided_version,
+ )
+ installed_minor_version = installed_version.split(".")[0:2]
+ provided_minor_version = provided_version.split(".")[0:2]
+
+ if (
+ not self.is_online_reindex
+ and installed_minor_version != provided_minor_version
+ ):
+ self.force_offline_reindex = True
+ return installed_version != provided_version
+
+ def _needs_init(self):
+ installed_war_path = os.path.join(self.site, "bin", "gerrit.war")
+ if not os.path.exists(installed_war_path):
+ LOG.info("Gerrit is not yet installed. Initializing new site.")
+ return True
+
+ if self._gerrit_war_updated():
+ LOG.info("Reinitializing site to perform update.")
+ return True
+
+ if self.plugin_installer.plugins_changed:
+ LOG.info("Plugins were installed or updated. Initializing.")
+ return True
+
+ if self.config.get_plugin_names().difference(self.installed_plugins):
+ LOG.info("Reininitializing site to install additional plugins.")
+ return True
+
+ LOG.info("No initialization required.")
+ return False
+
+ def _ensure_symlink(self, src, target):
+ if not os.path.exists(src):
+ raise FileNotFoundError(f"Unable to find mounted dir: {src}")
+
+ if os.path.islink(target) and os.path.realpath(target) == src:
+ return
+
+ if os.path.exists(target):
+ if os.path.isdir(target) and not os.path.islink(target):
+ shutil.rmtree(target)
+ else:
+ os.remove(target)
+
+ os.symlink(src, target)
+
+ def _symlink_mounted_site_components(self):
+ self._ensure_symlink(f"{MNT_PATH}/git", f"{self.site}/git")
+ self._ensure_symlink(f"{MNT_PATH}/logs", f"{self.site}/logs")
+
+ mounted_shared_dir = f"{MNT_PATH}/shared"
+ if not self.is_replica and os.path.exists(mounted_shared_dir):
+ self._ensure_symlink(mounted_shared_dir, f"{self.site}/shared")
+
+ index_type = self.gerrit_config.get("index.type", default=IndexType.LUCENE.name)
+ if IndexType[index_type.upper()] is IndexType.ELASTICSEARCH:
+ self._ensure_symlink(f"{MNT_PATH}/index", f"{self.site}/index")
+
+ data_dir = f"{self.site}/data"
+ if os.path.exists(data_dir):
+ for file_or_dir in os.listdir(data_dir):
+ abs_path = os.path.join(data_dir, file_or_dir)
+ if os.path.islink(abs_path) and not os.path.exists(
+ os.path.realpath(abs_path)
+ ):
+ os.unlink(abs_path)
+ else:
+ os.makedirs(data_dir)
+
+ mounted_data_dir = f"{MNT_PATH}/data"
+ if os.path.exists(mounted_data_dir):
+ for file_or_dir in os.listdir(mounted_data_dir):
+ abs_path = os.path.join(data_dir, file_or_dir)
+ abs_mounted_path = os.path.join(mounted_data_dir, file_or_dir)
+ if os.path.isdir(abs_mounted_path):
+ self._ensure_symlink(abs_mounted_path, abs_path)
+
+ def _symlink_configuration(self):
+ etc_dir = f"{self.site}/etc"
+ if not os.path.exists(etc_dir):
+ os.makedirs(etc_dir)
+
+ for config_type in ["config", "secret"]:
+ if os.path.exists(f"{MNT_PATH}/etc/{config_type}"):
+ for file_or_dir in os.listdir(f"{MNT_PATH}/etc/{config_type}"):
+ if os.path.isfile(
+ os.path.join(f"{MNT_PATH}/etc/{config_type}", file_or_dir)
+ ):
+ self._ensure_symlink(
+ os.path.join(f"{MNT_PATH}/etc/{config_type}", file_or_dir),
+ os.path.join(etc_dir, file_or_dir),
+ )
+
+ def _remove_auto_generated_ssh_keys(self):
+ etc_dir = f"{self.site}/etc"
+ if not os.path.exists(etc_dir):
+ return
+
+ for file_or_dir in os.listdir(etc_dir):
+ full_path = os.path.join(etc_dir, file_or_dir)
+ if os.path.isfile(full_path) and file_or_dir.startswith("ssh_host_"):
+ os.remove(full_path)
+
+ def execute(self):
+ if not self.is_replica:
+ self._symlink_mounted_site_components()
+ elif not NoteDbValidator(MNT_PATH).check():
+ LOG.info("NoteDB not ready. Initializing repositories.")
+ self._symlink_mounted_site_components()
+ self._symlink_configuration()
+
+ if os.path.exists(self.pid_file):
+ os.remove(self.pid_file)
+
+ self.plugin_installer.execute()
+
+ if self._needs_init():
+ if self.gerrit_config:
+ LOG.info("Existing gerrit.config found.")
+ dev_option = (
+ "--dev"
+ if self.gerrit_config.get(
+ "auth.type", "development_become_any_account"
+ ).lower()
+ == "development_become_any_account"
+ else ""
+ )
+ else:
+ LOG.info("No gerrit.config found. Initializing default site.")
+ dev_option = "--dev"
+
+ flags = f"--no-auto-start --batch {dev_option}"
+
+ command = f"java -jar /var/war/gerrit.war init {flags} -d {self.site}"
+
+ init_process = subprocess.run(
+ command.split(), stdout=subprocess.PIPE, check=True
+ )
+
+ if init_process.returncode > 0:
+ LOG.error(
+ "An error occurred, when initializing Gerrit. Exit code: %d",
+ init_process.returncode,
+ )
+ sys.exit(1)
+
+ self._remove_auto_generated_ssh_keys()
+ self._symlink_configuration()
+
+ if self.is_replica:
+ self._symlink_mounted_site_components()
+
+ get_reindexer(self.site, self.config).start(self.force_offline_reindex)
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/tasks/reindex.py b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/tasks/reindex.py
new file mode 100755
index 0000000..e5ec6df
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/tasks/reindex.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python3
+
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import enum
+import os.path
+import subprocess
+import sys
+
+import requests
+
+from ..helpers import git, log
+
+LOG = log.get_logger("reindex")
+MNT_PATH = "/var/mnt"
+INDEXES_PRIMARY = set(["accounts", "changes", "groups", "projects"])
+INDEXES_REPLICA = set(["groups"])
+
+
+class IndexType(enum.Enum):
+ LUCENE = enum.auto()
+ ELASTICSEARCH = enum.auto()
+
+
+class GerritAbstractReindexer(abc.ABC):
+ def __init__(self, gerrit_site_path, config):
+ self.gerrit_site_path = gerrit_site_path
+ self.index_config_path = f"{self.gerrit_site_path}/index/gerrit_index.config"
+ self.init_config = config
+
+ self.gerrit_config = git.GitConfigParser(
+ os.path.join(MNT_PATH, "etc/config/gerrit.config")
+ )
+ self.is_online_reindex = self.gerrit_config.get_boolean(
+ "index.onlineUpgrade", True
+ )
+ self.is_replica = self.gerrit_config.get_boolean("container.replica", False)
+
+ self.configured_indices = self._parse_gerrit_index_config()
+
+ @abc.abstractmethod
+ def _get_indices(self):
+ pass
+
+ def _parse_gerrit_index_config(self):
+ indices = {}
+ if os.path.exists(self.index_config_path):
+ config = git.GitConfigParser(self.index_config_path)
+ options = config.list()
+ for opt in options:
+ name, version = opt["subsection"].rsplit("_", 1)
+ ready = opt["value"].lower() == "true"
+ if name in indices:
+ indices[name] = {
+ "read": version if ready else indices[name]["read"],
+ "latest_write": max(version, indices[name]["latest_write"]),
+ }
+ else:
+ indices[name] = {
+ "read": version if ready else None,
+ "latest_write": version,
+ }
+ return indices
+
+ def _get_not_ready_indices(self):
+ not_ready_indices = []
+ for index, index_attrs in self.configured_indices.items():
+ if not index_attrs["read"]:
+ LOG.info("Index %s not ready.", index)
+ not_ready_indices.append(index)
+ index_set = INDEXES_REPLICA if self.is_replica else INDEXES_PRIMARY
+ not_ready_indices.extend(index_set.difference(self.configured_indices.keys()))
+ return not_ready_indices
+
+ def _indexes_need_update(self):
+ indices = self._get_indices()
+
+ if not indices:
+ return True
+
+ for index, index_attrs in self.configured_indices.items():
+ if (
+ index not in indices
+ or index_attrs["latest_write"] != indices[index]
+ or index_attrs["read"] != index_attrs["latest_write"]
+ ):
+ return True
+ return False
+
+ def reindex(self, indices=None):
+ LOG.info("Starting to reindex.")
+ command = f"java -jar /var/war/gerrit.war reindex -d {self.gerrit_site_path}"
+
+ if indices:
+ command += " ".join([f" --index {i}" for i in indices])
+
+ reindex_process = subprocess.run(
+ command.split(), stdout=subprocess.PIPE, check=True
+ )
+
+ if reindex_process.returncode > 0:
+ LOG.error(
+ "An error occurred, when reindexing Gerrit indices. Exit code: %d",
+ reindex_process.returncode,
+ )
+ sys.exit(1)
+
+ LOG.info("Finished reindexing.")
+
+ def start(self, is_forced):
+ if is_forced:
+ self.reindex()
+ return
+
+ if not self.configured_indices:
+ LOG.info("gerrit_index.config does not exist. Creating all indices.")
+ self.reindex()
+ return
+
+ not_ready_indices = self._get_not_ready_indices()
+ if not_ready_indices:
+ self.reindex(not_ready_indices)
+
+ if not self.is_online_reindex and self._indexes_need_update():
+ LOG.info("Not all indices are up-to-date.")
+ self.reindex()
+ return
+
+ LOG.info("Skipping reindexing.")
+
+
+class GerritLuceneReindexer(GerritAbstractReindexer):
+ def _get_indices(self):
+ file_list = os.listdir(os.path.join(self.gerrit_site_path, "index"))
+ file_list.remove("gerrit_index.config")
+ lucene_indices = {}
+ for index in file_list:
+ try:
+ (name, version) = index.split("_")
+ if name in lucene_indices:
+ lucene_indices[name] = max(version, lucene_indices[name])
+ else:
+ lucene_indices[name] = version
+ except ValueError:
+ LOG.debug("Ignoring invalid file in index-directory: %s", index)
+ return lucene_indices
+
+
+class GerritElasticSearchReindexer(GerritAbstractReindexer):
+ def _get_elasticsearch_config(self):
+ es_config = {}
+ gerrit_config = git.GitConfigParser(
+ os.path.join(self.gerrit_site_path, "etc", "gerrit.config")
+ )
+ es_config["prefix"] = gerrit_config.get(
+ "elasticsearch.prefix", default=""
+ ).lower()
+ es_config["server"] = gerrit_config.get(
+ "elasticsearch.server", default=""
+ ).lower()
+ return es_config
+
+ def _get_indices(self):
+ es_config = self._get_elasticsearch_config()
+ url = f"{es_config['server']}/{es_config['prefix']}*"
+ try:
+ response = requests.get(url)
+ except requests.exceptions.SSLError:
+ response = requests.get(url, verify=self.init_config.ca_cert_path)
+
+ es_indices = {}
+ for index, _ in response.json().items():
+ try:
+ index = index.replace(es_config["prefix"], "", 1)
+ (name, version) = index.split("_")
+ es_indices[name] = version
+ except ValueError:
+ LOG.debug("Found unknown index: %s", index)
+
+ return es_indices
+
+
+def get_reindexer(gerrit_site_path, config):
+ gerrit_config = git.GitConfigParser(
+ os.path.join(gerrit_site_path, "etc", "gerrit.config")
+ )
+ index_type = gerrit_config.get("index.type", default=IndexType.LUCENE.name)
+
+ if IndexType[index_type.upper()] is IndexType.LUCENE:
+ return GerritLuceneReindexer(gerrit_site_path, config)
+
+ if IndexType[index_type.upper()] is IndexType.ELASTICSEARCH:
+ return GerritElasticSearchReindexer(gerrit_site_path, config)
+
+ raise RuntimeError(f"Unknown index type {index_type}.")
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/tasks/validate_notedb.py b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/tasks/validate_notedb.py
new file mode 100644
index 0000000..aff9ce6
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/initializer/tasks/validate_notedb.py
@@ -0,0 +1,74 @@
+#!/usr/bin/python3
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import subprocess
+import time
+
+from ..helpers import log
+
+LOG = log.get_logger("init")
+
+
+class NoteDbValidator:
+ def __init__(self, site):
+ self.site = site
+
+ self.notedb_repos = ["All-Projects.git", "All-Users.git"]
+ self.required_refs = {
+ "All-Projects.git": ["refs/meta/config", "refs/meta/version"],
+ "All-Users.git": ["refs/meta/config"],
+ }
+
+ def _test_repo_exists(self, repo):
+ return os.path.exists(os.path.join(self.site, "git", repo))
+
+ def _test_ref_exists(self, repo, ref):
+ command = f"git --git-dir {self.site}/git/{repo} rev-parse --verify {ref}"
+ git_show_ref = subprocess.run(
+ command.split(),
+ stdout=subprocess.PIPE,
+ universal_newlines=True,
+ check=False,
+ )
+
+ return git_show_ref.returncode == 0
+
+ def wait_until_valid(self):
+ for repo in self.notedb_repos:
+ LOG.info("Waiting for repository %s.", repo)
+ while not self._test_repo_exists(repo):
+ time.sleep(1)
+ LOG.info("Found %s.", repo)
+
+ for ref in self.required_refs[repo]:
+ LOG.info("Waiting for ref %s in repository %s.", ref, repo)
+ while not self._test_ref_exists(repo, ref):
+ time.sleep(1)
+ LOG.info("Found ref %s in repo %s.", ref, repo)
+
+ def check(self):
+ for repo in self.notedb_repos:
+ if not self._test_repo_exists(repo):
+ LOG.info("Repository %s is missing.", repo)
+ return False
+ LOG.info("Found %s.", repo)
+
+ for ref in self.required_refs[repo]:
+ if not self._test_ref_exists(repo, ref):
+ LOG.info("Ref %s in repository %s is missing.", ref, repo)
+ return False
+ LOG.info("Found ref %s in repo %s.", ref, repo)
+ return True
diff --git a/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/main.py b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/main.py
new file mode 100755
index 0000000..b41cf3a
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit-init/tools/gerrit-initializer/main.py
@@ -0,0 +1,93 @@
+#!/usr/bin/python3
+
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import argparse
+
+from initializer.tasks import download_plugins, init, reindex, validate_notedb
+from initializer.config.init_config import InitConfig
+
+
+def _run_download_plugins(args):
+ config = InitConfig().parse(args.config)
+ download_plugins.get_installer(args.site, config).execute()
+
+
+def _run_init(args):
+ config = InitConfig().parse(args.config)
+ init.GerritInit(args.site, config).execute()
+
+
+def _run_reindex(args):
+ config = InitConfig().parse(args.config)
+ reindex.get_reindexer(args.site, config).start(args.force)
+
+
+def _run_validate_notedb(args):
+ validate_notedb.NoteDbValidator(args.site).wait_until_valid()
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "-s",
+ "--site",
+ help="Path to Gerrit site",
+ dest="site",
+ action="store",
+ default="/var/gerrit",
+ required=True,
+ )
+ parser.add_argument(
+ "-c",
+ "--config",
+ help="Path to configuration file for init process.",
+ dest="config",
+ action="store",
+ required=True,
+ )
+
+ subparsers = parser.add_subparsers()
+
+ parser_download_plugins = subparsers.add_parser(
+ "download-plugins", help="Download plugins"
+ )
+ parser_download_plugins.set_defaults(func=_run_download_plugins)
+
+ parser_init = subparsers.add_parser("init", help="Initialize Gerrit site")
+ parser_init.set_defaults(func=_run_init)
+
+ parser_reindex = subparsers.add_parser("reindex", help="Reindex Gerrit indexes")
+ parser_reindex.add_argument(
+ "-f",
+ "--force",
+ help="Reindex even if indices are ready.",
+ dest="force",
+ action="store_true",
+ )
+ parser_reindex.set_defaults(func=_run_reindex)
+
+ parser_validate_notedb = subparsers.add_parser(
+ "validate-notedb", help="Validate NoteDB"
+ )
+ parser_validate_notedb.set_defaults(func=_run_validate_notedb)
+
+ args = parser.parse_args()
+ args.func(args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/charts/k8s-gerrit/container-images/gerrit/Dockerfile b/charts/k8s-gerrit/container-images/gerrit/Dockerfile
new file mode 100644
index 0000000..6a2eebc
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit/Dockerfile
@@ -0,0 +1,4 @@
+ARG TAG=latest
+FROM gerrit-base:${TAG}
+
+COPY tools/* /var/tools/
diff --git a/charts/k8s-gerrit/container-images/gerrit/README.md b/charts/k8s-gerrit/container-images/gerrit/README.md
new file mode 100644
index 0000000..2cd1b7a
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit/README.md
@@ -0,0 +1,13 @@
+# Gerrit image
+
+Container image for a Gerrit instance
+
+## Content
+
+* the [gerrit-base](../gerrit-base/README.md) image
+* `/var/tools/start`: start script
+
+## Start
+
+* starts Gerrit via start script `/var/tools/start` either as primary or replica
+ depending on the provided `gerrit.config`
diff --git a/charts/k8s-gerrit/container-images/gerrit/tools/start b/charts/k8s-gerrit/container-images/gerrit/tools/start
new file mode 100755
index 0000000..2073181
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/gerrit/tools/start
@@ -0,0 +1,13 @@
+#!/bin/ash
+GERRIT_DAEMON_OPTS="--console-log --enable-httpd"
+
+IS_REPLICA=$(git config -f /var/gerrit/etc/gerrit.config --get container.replica)
+if [[ "$IS_REPLICA" == "true" ]]; then
+ GERRIT_DAEMON_OPTS="$GERRIT_DAEMON_OPTS --replica"
+fi
+
+JAVA_OPTIONS=$(git config --file /var/gerrit/etc/gerrit.config --get-all container.javaOptions)
+JAVA_OPTIONS="$JAVA_OPTIONS -Dgerrit.instanceId=$POD_NAME"
+java ${JAVA_OPTIONS} -jar /var/gerrit/bin/gerrit.war daemon \
+ -d /var/gerrit \
+ $GERRIT_DAEMON_OPTS
diff --git a/charts/k8s-gerrit/container-images/git-gc/Dockerfile b/charts/k8s-gerrit/container-images/git-gc/Dockerfile
new file mode 100644
index 0000000..2293b12
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/git-gc/Dockerfile
@@ -0,0 +1,13 @@
+ARG TAG=latest
+FROM base:${TAG}
+
+COPY tools/* /var/tools/
+
+RUN mkdir -p /var/log/git && \
+ chown gerrit:users /var/log/git
+
+USER gerrit
+
+VOLUME ["/var/gerrit/git"]
+
+ENTRYPOINT ["/var/tools/gc.sh"]
diff --git a/charts/k8s-gerrit/container-images/git-gc/README.md b/charts/k8s-gerrit/container-images/git-gc/README.md
new file mode 100644
index 0000000..2509f99
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/git-gc/README.md
@@ -0,0 +1,19 @@
+# Git GC container image
+
+Container for running `git gc`. It is meant to run as a CronJob, when used in
+Kubernetes. It can also be used to run garbage collection on-demand, e.g. using
+a Kubernetes Job.
+
+## Content
+
+* base image
+* `gc.sh`: gc-script
+
+## Setup and configuration
+
+* copy tools scripts
+* ensure filesystem permissions
+
+## Start
+
+* execution of the provided `gc.sh`
diff --git a/charts/k8s-gerrit/container-images/git-gc/tools/gc.sh b/charts/k8s-gerrit/container-images/git-gc/tools/gc.sh
new file mode 100755
index 0000000..e3ad4e0
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/git-gc/tools/gc.sh
@@ -0,0 +1,220 @@
+#!/bin/ash
+# Copyright (C) 2011, 2020 SAP SE
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+usage()
+{
+ echo "Usage: $0 [ -s ProjectName ] [ -p ProjectName ] [ -b ProjectName ]"
+ echo "-s ProjectName : skip this project"
+ echo "-p ProjectName : run git-gc for this project"
+ echo "-b ProjectName : do not write bitmaps for this project"
+ echo ""
+ echo "By default the script will run git-gc for all projects unless \"-p\" option is provided"
+ echo
+ echo "Examples:"
+ echo " Run git-gc for all projects but skip foo and bar/baz projects"
+ echo " $0 -s foo -s bar/baz"
+ echo " Run git-gc only for foo and bar/baz projects"
+ echo " $0 -p foo -p bar/baz"
+ echo " Run git-gc only for bar project without writing bitmaps"
+ echo " $0 -p bar -b bar"
+ echo
+ echo "To specify a one-time --aggressive git gc for a repository X, simply"
+ echo "create an empty file called \'gc-aggressive-once\' in the \$SITE/git/X.git"
+ echo "folder:"
+ echo
+ echo " \$ cd \$SITE/git/X.git"
+ echo " \$ touch gc-aggressive-once"
+ echo
+ echo "On the next run, gc.sh will use --aggressive option for gc-ing this"
+ echo "repository *and* will remove this file. Next time, gc.sh again runs"
+ echo "normal gc for this repository."
+ echo
+ echo "To specify a permanent --aggressive git gc for a repository, create"
+ echo "an empty file named "gc-aggresssive" in the same folder:"
+ echo
+ echo " \$ cd \$SITE/git/X.git"
+ echo " \$ touch gc-aggressive"
+ echo
+ echo "Every next git gc on this repository will use --aggressive option."
+ exit 2
+}
+
+gc_options()
+{
+ if test -f "$1/gc-aggressive" ; then
+ echo "--aggressive"
+ elif test -f "$1/gc-aggressive-once" ; then
+ echo "--aggressive"
+ rm -f "$1/gc-aggressive-once"
+ else
+ echo ""
+ fi
+}
+
+log_opts()
+{
+ if test -z $1 ; then
+ echo ""
+ else
+ echo " [$1]"
+ fi
+}
+
+log()
+{
+ # Rotate the $LOG if current date is different from the last modification of $LOG
+ if test -f "$LOG" ; then
+ TODAY=$(date +%Y-%m-%d)
+ LOG_LAST_MODIFIED=$(date +%Y-%m-%d -r $LOG)
+ if test "$TODAY" != "$LOG_LAST_MODIFIED" ; then
+ mv "$LOG" "$LOG.$LOG_LAST_MODIFIED"
+ gzip "$LOG.$LOG_LAST_MODIFIED"
+ fi
+ fi
+
+ # Do not log an empty line
+ if [[ ! "$1" =~ ^[[:space:]]*$ ]]; then
+ echo $1
+ echo $1 >>$LOG
+ fi
+}
+
+gc_all_projects()
+{
+ find $TOP -type d -path "*.git" -prune -o -name "*.git" | while IFS= read d
+ do
+ gc_project "${d#$TOP/}"
+ done
+}
+
+gc_specified_projects()
+{
+ for PROJECT_NAME in ${GC_PROJECTS}
+ do
+ gc_project "$PROJECT_NAME"
+ done
+}
+
+gc_project()
+{
+ PROJECT_NAME="$@"
+ PROJECT_DIR="$TOP/$PROJECT_NAME"
+
+ if [[ ! -d "$PROJECT_DIR" ]]; then
+ OUT=$(date +"%D %r Failed: Directory does not exist: $PROJECT_DIR") && log "$OUT"
+ return 1
+ fi
+
+ OPTS=$(gc_options "$PROJECT_DIR")
+ LOG_OPTS=$(log_opts $OPTS)
+
+ # Check if git-gc for this project has to be skipped
+ if [ $SKIP_PROJECTS_OPT -eq 1 ]; then
+ for SKIP_PROJECT in "${SKIP_PROJECTS}"; do
+ if [ "$SKIP_PROJECT" == "$PROJECT_NAME" ] ; then
+ OUT=$(date +"%D %r Skipped: $PROJECT_NAME") && log "$OUT"
+ return 0
+ fi
+ done
+ fi
+
+ # Check if writing bitmaps for this project has to be disabled
+ WRITEBITMAPS='true';
+ if [ $DONOT_WRITE_BITMAPS_OPT -eq 1 ]; then
+ for BITMAP_PROJECT in "${DONOT_WRITE_BITMAPS}"; do
+ if [ "$BITMAP_PROJECT" == "$PROJECT_NAME" ] ; then
+ WRITEBITMAPS='false';
+ fi
+ done
+ fi
+
+ OUT=$(date +"%D %r Started: $PROJECT_NAME$LOG_OPTS") && log "$OUT"
+
+ git --git-dir="$PROJECT_DIR" config core.logallrefupdates true
+
+ git --git-dir="$PROJECT_DIR" config repack.usedeltabaseoffset true
+ git --git-dir="$PROJECT_DIR" config repack.writebitmaps $WRITEBITMAPS
+ git --git-dir="$PROJECT_DIR" config pack.compression 9
+ git --git-dir="$PROJECT_DIR" config pack.indexversion 2
+
+ git --git-dir="$PROJECT_DIR" config gc.autodetach false
+ git --git-dir="$PROJECT_DIR" config gc.auto 0
+ git --git-dir="$PROJECT_DIR" config gc.autopacklimit 0
+ git --git-dir="$PROJECT_DIR" config gc.packrefs true
+ git --git-dir="$PROJECT_DIR" config gc.reflogexpire never
+ git --git-dir="$PROJECT_DIR" config gc.reflogexpireunreachable never
+ git --git-dir="$PROJECT_DIR" config receive.autogc false
+
+ git --git-dir="$PROJECT_DIR" config pack.window 250
+ git --git-dir="$PROJECT_DIR" config pack.depth 50
+
+ OUT=$(git -c gc.auto=6700 -c gc.autoPackLimit=4 --git-dir="$PROJECT_DIR" gc --auto --prune $OPTS || date +"%D %r Failed: $PROJECT_NAME") \
+ && log "$OUT"
+
+ (find "$PROJECT_DIR/refs/changes" -type d | xargs rmdir;
+ find "$PROJECT_DIR/refs/changes" -type d | xargs rmdir
+ ) 2>/dev/null
+
+ OUT=$(date +"%D %r Finished: $PROJECT_NAME$LOG_OPTS") && log "$OUT"
+}
+
+###########################
+# Main script starts here #
+###########################
+
+SKIP_PROJECTS=
+GC_PROJECTS=
+DONOT_WRITE_BITMAPS=
+SKIP_PROJECTS_OPT=0
+GC_PROJECTS_OPT=0
+DONOT_WRITE_BITMAPS_OPT=0
+
+while getopts 's:p:b:?h' c
+do
+ case $c in
+ s)
+ SKIP_PROJECTS="${SKIP_PROJECTS} ${OPTARG}.git"
+ SKIP_PROJECTS_OPT=1
+ ;;
+ p)
+ GC_PROJECTS="${GC_PROJECTS} ${OPTARG}.git"
+ GC_PROJECTS_OPT=1
+ ;;
+ b)
+ DONOT_WRITE_BITMAPS="${DONOT_WRITE_BITMAPS} ${OPTARG}.git"
+ DONOT_WRITE_BITMAPS_OPT=1
+ ;;
+ h|?)
+ usage
+ ;;
+ esac
+done
+
+test $# -eq 0 || usage
+
+TOP=/var/gerrit/git
+LOG=/var/log/git/gc.log
+
+OUT=$(date +"%D %r Started") && log "$OUT"
+
+if [ $GC_PROJECTS_OPT -eq 1 ]; then
+ gc_specified_projects
+else
+ gc_all_projects
+fi
+
+OUT=$(date +"%D %r Finished") && log "$OUT"
+
+exit 0
diff --git a/charts/k8s-gerrit/container-images/publish_list b/charts/k8s-gerrit/container-images/publish_list
new file mode 100644
index 0000000..9c8ad64
--- /dev/null
+++ b/charts/k8s-gerrit/container-images/publish_list
@@ -0,0 +1,6 @@
+get_image_list(){
+ echo "apache-git-http-backend" \
+ "gerrit-init" \
+ "gerrit" \
+ "git-gc"
+}
diff --git a/charts/k8s-gerrit/get_version.sh b/charts/k8s-gerrit/get_version.sh
new file mode 100755
index 0000000..df2dc0f
--- /dev/null
+++ b/charts/k8s-gerrit/get_version.sh
@@ -0,0 +1,5 @@
+REV=$(git describe --always --dirty)
+GERRIT_VERSION=$(docker run --platform=linux/amd64 --entrypoint "/bin/sh" gerrit-base:$REV \
+ -c "java -jar /var/gerrit/bin/gerrit.war version")
+GERRIT_VERSION=$(echo "${GERRIT_VERSION##*$'\n'}" | cut -d' ' -f3 | tr -d '[:space:]')
+echo "$REV-$GERRIT_VERSION"
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/.helmignore b/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/Chart.yaml b/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/Chart.yaml
new file mode 100644
index 0000000..9aa020c
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/Chart.yaml
@@ -0,0 +1,10 @@
+apiVersion: v2
+name: gerrit-operator-crds
+description: |
+ This helm chart installs CRDs that are managed/referenced by the gerrit
+ operator; namely - GerritCluster, Gerrit, GitGarbageCollection, Receiver. This
+ chart needs to be updated whenever there is a change in the operator source
+ code that updates the CRDs generated by fabric8.
+sources:
+- https://gerrit.googlesource.com/k8s-gerrit/+/refs/heads/master/operator
+version : 0.1.0
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/templates/gerritclusters.gerritoperator.google.com-v1.yml b/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/templates/gerritclusters.gerritoperator.google.com-v1.yml
new file mode 100644
index 0000000..0fa15b7
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/templates/gerritclusters.gerritoperator.google.com-v1.yml
@@ -0,0 +1,1494 @@
+# Generated by Fabric8 CRDGenerator, manual edits might get overwritten!
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: gerritclusters.gerritoperator.google.com
+spec:
+ group: gerritoperator.google.com
+ names:
+ kind: GerritCluster
+ plural: gerritclusters
+ shortNames:
+ - gclus
+ singular: gerritcluster
+ scope: Namespaced
+ versions:
+ - name: v1alpha17
+ schema:
+ openAPIV3Schema:
+ properties:
+ spec:
+ properties:
+ storage:
+ properties:
+ pluginCache:
+ properties:
+ enabled:
+ type: boolean
+ type: object
+ storageClasses:
+ properties:
+ readWriteOnce:
+ type: string
+ readWriteMany:
+ type: string
+ nfsWorkaround:
+ properties:
+ enabled:
+ type: boolean
+ chownOnStartup:
+ type: boolean
+ idmapdConfig:
+ type: string
+ type: object
+ type: object
+ sharedStorage:
+ properties:
+ externalPVC:
+ properties:
+ enabled:
+ type: boolean
+ claimName:
+ type: string
+ type: object
+ size:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ volumeName:
+ type: string
+ selector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ type: object
+ type: object
+ containerImages:
+ properties:
+ imagePullPolicy:
+ type: string
+ imagePullSecrets:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ busyBox:
+ properties:
+ registry:
+ type: string
+ tag:
+ type: string
+ type: object
+ gerritImages:
+ properties:
+ registry:
+ type: string
+ org:
+ type: string
+ tag:
+ type: string
+ type: object
+ type: object
+ ingress:
+ properties:
+ enabled:
+ type: boolean
+ host:
+ type: string
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ tls:
+ properties:
+ enabled:
+ type: boolean
+ secret:
+ type: string
+ type: object
+ ssh:
+ properties:
+ enabled:
+ type: boolean
+ type: object
+ ambassador:
+ properties:
+ id:
+ items:
+ type: string
+ type: array
+ createHost:
+ type: boolean
+ type: object
+ type: object
+ refdb:
+ properties:
+ database:
+ enum:
+ - NONE
+ - ZOOKEEPER
+ - SPANNER
+ type: string
+ zookeeper:
+ properties:
+ connectString:
+ type: string
+ rootNode:
+ type: string
+ type: object
+ spanner:
+ properties:
+ projectName:
+ type: string
+ instance:
+ type: string
+ database:
+ type: string
+ type: object
+ type: object
+ serverId:
+ type: string
+ gerrits:
+ items:
+ properties:
+ metadata:
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ creationTimestamp:
+ type: string
+ deletionGracePeriodSeconds:
+ type: integer
+ deletionTimestamp:
+ type: string
+ finalizers:
+ items:
+ type: string
+ type: array
+ generateName:
+ type: string
+ generation:
+ type: integer
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ managedFields:
+ items:
+ properties:
+ apiVersion:
+ type: string
+ fieldsType:
+ type: string
+ fieldsV1:
+ type: object
+ manager:
+ type: string
+ operation:
+ type: string
+ subresource:
+ type: string
+ time:
+ type: string
+ type: object
+ type: array
+ name:
+ type: string
+ namespace:
+ type: string
+ ownerReferences:
+ items:
+ properties:
+ apiVersion:
+ type: string
+ blockOwnerDeletion:
+ type: boolean
+ controller:
+ type: boolean
+ kind:
+ type: string
+ name:
+ type: string
+ uid:
+ type: string
+ type: object
+ type: array
+ resourceVersion:
+ type: string
+ selfLink:
+ type: string
+ uid:
+ type: string
+ type: object
+ spec:
+ properties:
+ serviceAccount:
+ type: string
+ tolerations:
+ items:
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ type: object
+ type: array
+ affinity:
+ properties:
+ nodeAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ preference:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ properties:
+ nodeSelectorTerms:
+ items:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ type: object
+ type: array
+ type: object
+ type: object
+ podAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ type: array
+ type: object
+ type: object
+ topologySpreadConstraints:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ matchLabelKeys:
+ items:
+ type: string
+ type: array
+ maxSkew:
+ type: integer
+ minDomains:
+ type: integer
+ nodeAffinityPolicy:
+ type: string
+ nodeTaintsPolicy:
+ type: string
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ type: object
+ type: array
+ priorityClassName:
+ type: string
+ replicas:
+ type: integer
+ updatePartition:
+ type: integer
+ resources:
+ properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ gracefulStopTimeout:
+ type: integer
+ service:
+ properties:
+ sshPort:
+ type: integer
+ type:
+ type: string
+ httpPort:
+ type: integer
+ type: object
+ site:
+ properties:
+ size:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ plugins:
+ items:
+ properties:
+ installAsLibrary:
+ type: boolean
+ name:
+ type: string
+ url:
+ type: string
+ sha1:
+ type: string
+ type: object
+ type: array
+ libs:
+ items:
+ properties:
+ name:
+ type: string
+ url:
+ type: string
+ sha1:
+ type: string
+ type: object
+ type: array
+ configFiles:
+ additionalProperties:
+ type: string
+ type: object
+ secretRef:
+ type: string
+ mode:
+ enum:
+ - PRIMARY
+ - REPLICA
+ type: string
+ debug:
+ properties:
+ enabled:
+ type: boolean
+ suspend:
+ type: boolean
+ type: object
+ type: object
+ type: object
+ type: array
+ receiver:
+ properties:
+ metadata:
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ creationTimestamp:
+ type: string
+ deletionGracePeriodSeconds:
+ type: integer
+ deletionTimestamp:
+ type: string
+ finalizers:
+ items:
+ type: string
+ type: array
+ generateName:
+ type: string
+ generation:
+ type: integer
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ managedFields:
+ items:
+ properties:
+ apiVersion:
+ type: string
+ fieldsType:
+ type: string
+ fieldsV1:
+ type: object
+ manager:
+ type: string
+ operation:
+ type: string
+ subresource:
+ type: string
+ time:
+ type: string
+ type: object
+ type: array
+ name:
+ type: string
+ namespace:
+ type: string
+ ownerReferences:
+ items:
+ properties:
+ apiVersion:
+ type: string
+ blockOwnerDeletion:
+ type: boolean
+ controller:
+ type: boolean
+ kind:
+ type: string
+ name:
+ type: string
+ uid:
+ type: string
+ type: object
+ type: array
+ resourceVersion:
+ type: string
+ selfLink:
+ type: string
+ uid:
+ type: string
+ type: object
+ spec:
+ properties:
+ tolerations:
+ items:
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ type: object
+ type: array
+ affinity:
+ properties:
+ nodeAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ preference:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ properties:
+ nodeSelectorTerms:
+ items:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ type: object
+ type: array
+ type: object
+ type: object
+ podAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ type: array
+ type: object
+ type: object
+ topologySpreadConstraints:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ matchLabelKeys:
+ items:
+ type: string
+ type: array
+ maxSkew:
+ type: integer
+ minDomains:
+ type: integer
+ nodeAffinityPolicy:
+ type: string
+ nodeTaintsPolicy:
+ type: string
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ type: object
+ type: array
+ priorityClassName:
+ type: string
+ replicas:
+ type: integer
+ maxSurge:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ maxUnavailable:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ resources:
+ properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ service:
+ properties:
+ type:
+ type: string
+ httpPort:
+ type: integer
+ type: object
+ credentialSecretRef:
+ type: string
+ type: object
+ type: object
+ type: object
+ status:
+ properties:
+ members:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/templates/gerritnetworks.gerritoperator.google.com-v1.yml b/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/templates/gerritnetworks.gerritoperator.google.com-v1.yml
new file mode 100644
index 0000000..77ac756
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/templates/gerritnetworks.gerritoperator.google.com-v1.yml
@@ -0,0 +1,134 @@
+# Generated by Fabric8 CRDGenerator, manual edits might get overwritten!
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: gerritnetworks.gerritoperator.google.com
+spec:
+ group: gerritoperator.google.com
+ names:
+ kind: GerritNetwork
+ plural: gerritnetworks
+ shortNames:
+ - gn
+ singular: gerritnetwork
+ scope: Namespaced
+ versions:
+ - name: v1alpha2
+ schema:
+ openAPIV3Schema:
+ properties:
+ spec:
+ properties:
+ ingress:
+ properties:
+ enabled:
+ type: boolean
+ host:
+ type: string
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ tls:
+ properties:
+ enabled:
+ type: boolean
+ secret:
+ type: string
+ type: object
+ ssh:
+ properties:
+ enabled:
+ type: boolean
+ type: object
+ ambassador:
+ properties:
+ id:
+ items:
+ type: string
+ type: array
+ createHost:
+ type: boolean
+ type: object
+ type: object
+ receiver:
+ properties:
+ name:
+ type: string
+ httpPort:
+ type: integer
+ type: object
+ primaryGerrit:
+ properties:
+ sshPort:
+ type: integer
+ name:
+ type: string
+ httpPort:
+ type: integer
+ type: object
+ gerritReplica:
+ properties:
+ sshPort:
+ type: integer
+ name:
+ type: string
+ httpPort:
+ type: integer
+ type: object
+ type: object
+ status:
+ properties:
+ apiVersion:
+ type: string
+ code:
+ type: integer
+ details:
+ properties:
+ causes:
+ items:
+ properties:
+ field:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ type: object
+ type: array
+ group:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ retryAfterSeconds:
+ type: integer
+ uid:
+ type: string
+ type: object
+ kind:
+ type: string
+ message:
+ type: string
+ metadata:
+ properties:
+ continue:
+ type: string
+ remainingItemCount:
+ type: integer
+ resourceVersion:
+ type: string
+ selfLink:
+ type: string
+ type: object
+ reason:
+ type: string
+ status:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/templates/gerrits.gerritoperator.google.com-v1.yml b/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/templates/gerrits.gerritoperator.google.com-v1.yml
new file mode 100644
index 0000000..a029288
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/templates/gerrits.gerritoperator.google.com-v1.yml
@@ -0,0 +1,801 @@
+# Generated by Fabric8 CRDGenerator, manual edits might get overwritten!
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: gerrits.gerritoperator.google.com
+spec:
+ group: gerritoperator.google.com
+ names:
+ kind: Gerrit
+ plural: gerrits
+ shortNames:
+ - gcr
+ singular: gerrit
+ scope: Namespaced
+ versions:
+ - name: v1alpha17
+ schema:
+ openAPIV3Schema:
+ properties:
+ spec:
+ properties:
+ containerImages:
+ properties:
+ imagePullPolicy:
+ type: string
+ imagePullSecrets:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ busyBox:
+ properties:
+ registry:
+ type: string
+ tag:
+ type: string
+ type: object
+ gerritImages:
+ properties:
+ registry:
+ type: string
+ org:
+ type: string
+ tag:
+ type: string
+ type: object
+ type: object
+ storage:
+ properties:
+ pluginCache:
+ properties:
+ enabled:
+ type: boolean
+ type: object
+ storageClasses:
+ properties:
+ readWriteOnce:
+ type: string
+ readWriteMany:
+ type: string
+ nfsWorkaround:
+ properties:
+ enabled:
+ type: boolean
+ chownOnStartup:
+ type: boolean
+ idmapdConfig:
+ type: string
+ type: object
+ type: object
+ sharedStorage:
+ properties:
+ externalPVC:
+ properties:
+ enabled:
+ type: boolean
+ claimName:
+ type: string
+ type: object
+ size:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ volumeName:
+ type: string
+ selector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ type: object
+ type: object
+ ingress:
+ properties:
+ enabled:
+ type: boolean
+ host:
+ type: string
+ tlsEnabled:
+ type: boolean
+ ssh:
+ properties:
+ enabled:
+ type: boolean
+ type: object
+ type: object
+ refdb:
+ properties:
+ database:
+ enum:
+ - NONE
+ - ZOOKEEPER
+ - SPANNER
+ type: string
+ zookeeper:
+ properties:
+ connectString:
+ type: string
+ rootNode:
+ type: string
+ type: object
+ spanner:
+ properties:
+ projectName:
+ type: string
+ instance:
+ type: string
+ database:
+ type: string
+ type: object
+ type: object
+ serverId:
+ type: string
+ serviceAccount:
+ type: string
+ tolerations:
+ items:
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ type: object
+ type: array
+ affinity:
+ properties:
+ nodeAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ preference:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ properties:
+ nodeSelectorTerms:
+ items:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ type: object
+ type: array
+ type: object
+ type: object
+ podAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ type: array
+ type: object
+ type: object
+ topologySpreadConstraints:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ matchLabelKeys:
+ items:
+ type: string
+ type: array
+ maxSkew:
+ type: integer
+ minDomains:
+ type: integer
+ nodeAffinityPolicy:
+ type: string
+ nodeTaintsPolicy:
+ type: string
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ type: object
+ type: array
+ priorityClassName:
+ type: string
+ replicas:
+ type: integer
+ updatePartition:
+ type: integer
+ resources:
+ properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ gracefulStopTimeout:
+ type: integer
+ service:
+ properties:
+ sshPort:
+ type: integer
+ type:
+ type: string
+ httpPort:
+ type: integer
+ type: object
+ site:
+ properties:
+ size:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ plugins:
+ items:
+ properties:
+ installAsLibrary:
+ type: boolean
+ name:
+ type: string
+ url:
+ type: string
+ sha1:
+ type: string
+ type: object
+ type: array
+ libs:
+ items:
+ properties:
+ name:
+ type: string
+ url:
+ type: string
+ sha1:
+ type: string
+ type: object
+ type: array
+ configFiles:
+ additionalProperties:
+ type: string
+ type: object
+ secretRef:
+ type: string
+ mode:
+ enum:
+ - PRIMARY
+ - REPLICA
+ type: string
+ debug:
+ properties:
+ enabled:
+ type: boolean
+ suspend:
+ type: boolean
+ type: object
+ type: object
+ status:
+ properties:
+ ready:
+ type: boolean
+ appliedConfigMapVersions:
+ additionalProperties:
+ type: string
+ type: object
+ appliedSecretVersions:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/templates/gitgcs.gerritoperator.google.com-v1.yml b/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/templates/gitgcs.gerritoperator.google.com-v1.yml
new file mode 100644
index 0000000..7974e13
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/templates/gitgcs.gerritoperator.google.com-v1.yml
@@ -0,0 +1,386 @@
+# Generated by Fabric8 CRDGenerator, manual edits might get overwritten!
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: gitgcs.gerritoperator.google.com
+spec:
+ group: gerritoperator.google.com
+ names:
+ kind: GitGarbageCollection
+ plural: gitgcs
+ shortNames:
+ - gitgc
+ singular: gitgarbagecollection
+ scope: Namespaced
+ versions:
+ - name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ properties:
+ spec:
+ properties:
+ cluster:
+ type: string
+ schedule:
+ type: string
+ projects:
+ items:
+ type: string
+ type: array
+ resources:
+ properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ tolerations:
+ items:
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ type: object
+ type: array
+ affinity:
+ properties:
+ nodeAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ preference:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ properties:
+ nodeSelectorTerms:
+ items:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ type: object
+ type: array
+ type: object
+ type: object
+ podAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ type: array
+ type: object
+ type: object
+ type: object
+ status:
+ properties:
+ replicateAll:
+ type: boolean
+ excludedProjects:
+ items:
+ type: string
+ type: array
+ state:
+ enum:
+ - ACTIVE
+ - INACTIVE
+ - CONFLICT
+ - ERROR
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/templates/receivers.gerritoperator.google.com-v1.yml b/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/templates/receivers.gerritoperator.google.com-v1.yml
new file mode 100644
index 0000000..c314670
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/templates/receivers.gerritoperator.google.com-v1.yml
@@ -0,0 +1,655 @@
+# Generated by Fabric8 CRDGenerator, manual edits might get overwritten!
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: receivers.gerritoperator.google.com
+spec:
+ group: gerritoperator.google.com
+ names:
+ kind: Receiver
+ plural: receivers
+ shortNames:
+ - grec
+ singular: receiver
+ scope: Namespaced
+ versions:
+ - name: v1alpha6
+ schema:
+ openAPIV3Schema:
+ properties:
+ spec:
+ properties:
+ containerImages:
+ properties:
+ imagePullPolicy:
+ type: string
+ imagePullSecrets:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ busyBox:
+ properties:
+ registry:
+ type: string
+ tag:
+ type: string
+ type: object
+ gerritImages:
+ properties:
+ registry:
+ type: string
+ org:
+ type: string
+ tag:
+ type: string
+ type: object
+ type: object
+ storage:
+ properties:
+ storageClasses:
+ properties:
+ readWriteOnce:
+ type: string
+ readWriteMany:
+ type: string
+ nfsWorkaround:
+ properties:
+ enabled:
+ type: boolean
+ chownOnStartup:
+ type: boolean
+ idmapdConfig:
+ type: string
+ type: object
+ type: object
+ sharedStorage:
+ properties:
+ externalPVC:
+ properties:
+ enabled:
+ type: boolean
+ claimName:
+ type: string
+ type: object
+ size:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ volumeName:
+ type: string
+ selector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ type: object
+ type: object
+ ingress:
+ properties:
+ enabled:
+ type: boolean
+ host:
+ type: string
+ tlsEnabled:
+ type: boolean
+ ssh:
+ properties:
+ enabled:
+ type: boolean
+ type: object
+ type: object
+ tolerations:
+ items:
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ type: object
+ type: array
+ affinity:
+ properties:
+ nodeAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ preference:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ properties:
+ nodeSelectorTerms:
+ items:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ type: object
+ type: array
+ type: object
+ type: object
+ podAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ type: array
+ type: object
+ type: object
+ topologySpreadConstraints:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ matchLabelKeys:
+ items:
+ type: string
+ type: array
+ maxSkew:
+ type: integer
+ minDomains:
+ type: integer
+ nodeAffinityPolicy:
+ type: string
+ nodeTaintsPolicy:
+ type: string
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ type: object
+ type: array
+ priorityClassName:
+ type: string
+ replicas:
+ type: integer
+ maxSurge:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ maxUnavailable:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ resources:
+ properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ service:
+ properties:
+ type:
+ type: string
+ httpPort:
+ type: integer
+ type: object
+ credentialSecretRef:
+ type: string
+ type: object
+ status:
+ properties:
+ ready:
+ type: boolean
+ appliedCredentialSecretVersion:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/values.yaml b/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/values.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-operator-crds/values.yaml
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-operator/.helmignore b/charts/k8s-gerrit/helm-charts/gerrit-operator/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-operator/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-operator/Chart.yaml b/charts/k8s-gerrit/helm-charts/gerrit-operator/Chart.yaml
new file mode 100644
index 0000000..21ce467
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-operator/Chart.yaml
@@ -0,0 +1,12 @@
+apiVersion: v2
+name: gerrit-operator
+description: |
+ This helm chart creates a Deployment for the gerrit-operator. A corresponding
+ Service for the operator is implicitly created.
+sources:
+- https://gerrit.googlesource.com/k8s-gerrit/+/refs/heads/master/operator
+version : 0.1.0
+dependencies:
+- name: gerrit-operator-crds
+ version: 0.1.0
+ repository: "file://../gerrit-operator-crds"
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-operator/templates/operator.yaml b/charts/k8s-gerrit/helm-charts/gerrit-operator/templates/operator.yaml
new file mode 100644
index 0000000..f9ed84f
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-operator/templates/operator.yaml
@@ -0,0 +1,71 @@
+{{- if .Values.externalKeyStore.enabled }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: gerrit-operator-ssl
+ namespace: {{ .Release.Namespace }}
+data:
+ keystore.jks: {{ .Values.externalKeyStore.jks }}
+ keystore.password: {{ .Values.externalKeyStore.password | b64enc }}
+type: Opaque
+{{- end }}
+
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: gerrit-operator
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: gerrit-operator
+ template:
+ metadata:
+ labels:
+ app: gerrit-operator
+ spec:
+ serviceAccountName: gerrit-operator
+ {{- with .Values.image.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: operator
+ image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.org }}/{{ .Values.image.name }}:{{ .Values.image.tag | default "latest" }}
+ imagePullPolicy: {{ .Values.image.imagePullPolicy }}
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: INGRESS
+ value: {{ .Values.ingress.type }}
+ ports:
+ - containerPort: 80
+ readinessProbe:
+ httpGet:
+ path: /health
+ port: 8080
+ scheme: HTTPS
+ initialDelaySeconds: 10
+ livenessProbe:
+ httpGet:
+ path: /health
+ port: 8080
+ scheme: HTTPS
+ initialDelaySeconds: 30
+ {{- if .Values.externalKeyStore.enabled }}
+ volumeMounts:
+ - name: ssl
+ readOnly: true
+ mountPath: /operator
+ {{- end }}
+ {{- if .Values.externalKeyStore.enabled }}
+ volumes:
+ - name: ssl
+ secret:
+ secretName: gerrit-operator-ssl
+ {{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-operator/templates/rbac.yaml b/charts/k8s-gerrit/helm-charts/gerrit-operator/templates/rbac.yaml
new file mode 100644
index 0000000..fbd2ae7
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-operator/templates/rbac.yaml
@@ -0,0 +1,87 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: gerrit-operator
+ namespace: {{ .Release.Namespace }}
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: gerrit-operator-admin
+subjects:
+- kind: ServiceAccount
+ name: gerrit-operator
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: gerrit-operator
+ apiGroup: ""
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: gerrit-operator
+rules:
+- apiGroups:
+ - "batch"
+ resources:
+ - cronjobs
+ verbs:
+ - '*'
+- apiGroups:
+ - "apps"
+ resources:
+ - statefulsets
+ - deployments
+ verbs:
+ - '*'
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - persistentvolumeclaims
+ - secrets
+ - services
+ verbs:
+ - '*'
+- apiGroups:
+ - "storage.k8s.io"
+ resources:
+ - storageclasses
+ verbs:
+ - 'get'
+ - 'list'
+- apiGroups:
+ - "apiextensions.k8s.io"
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - '*'
+- apiGroups:
+ - "networking.k8s.io"
+ resources:
+ - ingresses
+ verbs:
+ - '*'
+- apiGroups:
+ - "gerritoperator.google.com"
+ resources:
+ - '*'
+ verbs:
+ - '*'
+- apiGroups:
+ - "networking.istio.io"
+ resources:
+ - "gateways"
+ - "virtualservices"
+ - "destinationrules"
+ verbs:
+ - '*'
+- apiGroups:
+ - "admissionregistration.k8s.io"
+ resources:
+ - 'validatingwebhookconfigurations'
+ verbs:
+ - '*'
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-operator/values.yaml b/charts/k8s-gerrit/helm-charts/gerrit-operator/values.yaml
new file mode 100644
index 0000000..ebb88af
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-operator/values.yaml
@@ -0,0 +1,21 @@
+image:
+ registry: docker.io
+ org: k8sgerrit
+ name: gerrit-operator
+ tag: latest
+ imagePullPolicy: Always
+ imagePullSecrets: []
+ # - name: my-secret-1
+
+ingress:
+ # Which ingress provider to use (options: NONE, INGRESS, ISTIO)
+ type: NONE
+
+## Required to use an external/persistent keystore, otherwise a keystore using
+## self-signed certificates will be generated
+externalKeyStore:
+ enabled: false
+ # base64-encoded Java keystore
+ jks: ""
+ # Java keystore password (not base64-encoded)
+ password: ""
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/.helmignore b/charts/k8s-gerrit/helm-charts/gerrit-replica/.helmignore
new file mode 100644
index 0000000..4f4562f
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/.helmignore
@@ -0,0 +1,24 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+
+docs/
+supplements/
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/Chart.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/Chart.yaml
new file mode 100644
index 0000000..1f54472
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/Chart.yaml
@@ -0,0 +1,24 @@
+apiVersion: v2
+appVersion: 3.8.0
+description: |-
+ The Gerrit replica serves as a read-only Gerrit instance to serve repositories
+ that it receives from a Gerrit instance via replication. It can be used to
+ reduce the load on Gerrit instances.
+name: gerrit-replica
+version: 0.2.0
+maintainers:
+- name: Thomas Draebing
+ email: thomas.draebing@sap.com
+- name: Matthias Sohn
+ email: matthias.sohn@sap.com
+- name: Sasa Zivkov
+ email: sasa.zivkov@sap.com
+- name: Christian Halstrick
+ email: christian.halstrick@sap.com
+home: https://gerrit.googlesource.com/k8s-gerrit/+/master/helm-charts/gerrit-replica
+icon: http://commondatastorage.googleapis.com/gerrit-static/diffy-w200.png
+sources:
+- https://gerrit.googlesource.com/k8s-gerrit/+/master
+keywords:
+- gerrit
+- git
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/LICENSE b/charts/k8s-gerrit/helm-charts/gerrit-replica/LICENSE
new file mode 100644
index 0000000..028fc9f
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright (C) 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/README.md b/charts/k8s-gerrit/helm-charts/gerrit-replica/README.md
new file mode 100644
index 0000000..993f4d9
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/README.md
@@ -0,0 +1,546 @@
+# Gerrit replica on Kubernetes
+
+Gerrit is a web-based code review tool, which acts as a Git server. On large setups
+Gerrit servers can see a sizable amount of traffic from git operations performed by
+developers and build servers. The major part of requests are read-only requests
+(e.g. by `git fetch` operations). To take some load of the Gerrit server,
+Gerrit replicas can be deployed to serve read-only requests.
+
+This helm chart provides a Gerrit replica setup that can be deployed on Kubernetes.
+The Gerrit replica is capable of receiving replicated git repositories from a
+Gerrit. The Gerrit replica can then serve authenticated read-only requests.
+
+***note
+Gerrit versions before 3.0 are no longer supported, since the support of ReviewDB
+was removed.
+***
+
+## Prerequisites
+
+- Helm (>= version 3.0)
+
+ (Check out [this guide](https://docs.helm.sh/using_helm/#quickstart-guide)
+ how to install and use helm.)
+
+- Access to a provisioner for persistent volumes with `Read Write Many (RWM)`-
+ capability.
+
+ A list of applicaple volume types can be found
+ [here](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes).
+ This project was developed using the
+ [NFS-server-provisioner helm chart](https://github.com/helm/charts/tree/master/stable/nfs-server-provisioner),
+ a NFS-provisioner deployed in the Kubernetes cluster itself. Refer to
+ [this guide](/helm-charts/gerrit-replica/docs/nfs-provisioner.md) of how to
+ deploy it in context of this project.
+
+- A domain name that is configured to point to the IP address of the node running
+ the Ingress controller on the kubernetes cluster (as described
+ [here](http://alesnosek.com/blog/2017/02/14/accessing-kubernetes-pods-from-outside-of-the-cluster/)).
+
+- (Optional: Required, if SSL is configured)
+ A [Java keystore](https://gerrit-review.googlesource.com/Documentation/config-gerrit.html#httpd.sslKeyStore)
+ to be used by Gerrit.
+
+## Installing the Chart
+
+***note
+**ATTENTION:** The value for `ingress.host` is required for rendering
+the chart's templates. The nature of the value does not allow defaults.
+Thus a custom `values.yaml`-file setting this value is required!
+***
+
+To install the chart with the release name `gerrit-replica`, execute:
+
+```sh
+cd $(git rev-parse --show-toplevel)/helm-charts
+helm install \
+ gerrit-replica \ # release name
+ ./gerrit-replica \ # path to chart
+ -f <path-to-custom-values>.yaml
+```
+
+The command deploys the Gerrit replica on the current Kubernetes cluster. The
+[configuration section](#Configuration) lists the parameters that can be
+configured during installation.
+
+The Gerrit replica requires the replicated `All-Projects.git`- and `All-Users.git`-
+repositories to be present in the `/var/gerrit/git`-directory. The `gerrit-init`-
+InitContainer will wait for this being the case. A way to do this is to access
+the Gerrit replica pod and to clone the repositories from the primary Gerrit (Make
+sure that you have the correct access rights do so.):
+
+```sh
+kubectl exec -it <gerrit-replica-pod> -c gerrit-init ash
+gerrit@<gerrit-replica-pod>:/var/tools$ cd /var/gerrit/git
+gerrit@<gerrit-replica-pod>:/var/gerrit/git$ git clone "http://gerrit.com/All-Projects" --mirror
+Cloning into bare repository 'All-Projects.git'...
+gerrit@<gerrit-replica-pod>:/var/gerrit/git$ git clone "http://gerrit.com/All-Users" --mirror
+Cloning into bare repository 'All-Users.git'...
+```
+
+## Configuration
+
+The following sections list the configurable values in `values.yaml`. To configure
+a Gerrit replica setup, make a copy of the `values.yaml`-file and change the
+parameters as needed. The configuration can be applied by installing the chart as
+described [above](#Installing-the-chart).
+
+In addition, single options can be set without creating a custom `values.yaml`:
+
+```sh
+cd $(git rev-parse --show-toplevel)/helm-charts
+helm install \
+ gerrit-replica \ # release name
+ ./gerrit-replica \ # path to chart
+ --set=gitRepositoryStorage.size=100Gi,gitBackend.replicas=2
+```
+
+### Container images
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `images.busybox.registry` | The registry to pull the busybox container images from | `docker.io` |
+| `images.busybox.tag` | The busybox image tag to use | `latest` |
+| `images.registry.name` | The image registry to pull the container images from | `` |
+| `images.registry.ImagePullSecret.name` | Name of the ImagePullSecret | `image-pull-secret` (if empty no image pull secret will be deployed) |
+| `images.registry.ImagePullSecret.create` | Whether to create an ImagePullSecret | `false` |
+| `images.registry.ImagePullSecret.username` | The image registry username | `nil` |
+| `images.registry.ImagePullSecret.password` | The image registry password | `nil` |
+| `images.version` | The image version (image tag) to use | `latest` |
+| `images.imagePullPolicy` | Image pull policy | `Always` |
+| `images.additionalImagePullSecrets` | Additional image pull policies that pods should use | `[]` |
+
+### Labels
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `additionalLabels` | Additional labels for resources managed by this Helm chart | `{}` |
+
+### Storage classes
+
+For information of how a `StorageClass` is configured in Kubernetes, read the
+[official Documentation](https://kubernetes.io/docs/concepts/storage/storage-classes/#introduction).
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `storageClasses.default.name` | The name of the default StorageClass (RWO) | `default` |
+| `storageClasses.default.create` | Whether to create the StorageClass | `false` |
+| `storageClasses.default.provisioner` | Provisioner of the StorageClass | `kubernetes.io/aws-ebs` |
+| `storageClasses.default.reclaimPolicy` | Whether to `Retain` or `Delete` volumes, when they become unbound | `Delete` |
+| `storageClasses.default.parameters` | Parameters for the provisioner | `parameters.type: gp2`, `parameters.fsType: ext4` |
+| `storageClasses.default.mountOptions` | The mount options of the default StorageClass | `[]` |
+| `storageClasses.default.allowVolumeExpansion` | Whether to allow volume expansion. | `false` |
+| `storageClasses.shared.name` | The name of the shared StorageClass (RWM) | `shared-storage` |
+| `storageClasses.shared.create` | Whether to create the StorageClass | `false` |
+| `storageClasses.shared.provisioner` | Provisioner of the StorageClass | `nfs` |
+| `storageClasses.shared.reclaimPolicy` | Whether to `Retain` or `Delete` volumes, when they become unbound | `Delete` |
+| `storageClasses.shared.parameters` | Parameters for the provisioner | `parameters.mountOptions: vers=4.1` |
+| `storageClasses.shared.mountOptions` | The mount options of the shared StorageClass | `[]` |
+| `storageClasses.shared.allowVolumeExpansion` | Whether to allow volume expansion. | `false` |
+
+### CA certificate
+
+Some application may require TLS verification. If the default CA built into the
+containers is not enough a custom CA certificate can be given to the deployment.
+Note, that Gerrit will require its CA in a JKS keytore, which is described below.
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `caCert` | CA certificate for TLS verification (if not set, the default will be used) | `None` |
+
+### Workaround for NFS
+
+Kubernetes will not always be able to adapt the ownership of the files within NFS
+volumes. Thus, a workaround exists that will add init-containers to
+adapt file ownership. Note, that only the ownership of the root directory of the
+volume will be changed. All data contained within will be expected to already be
+owned by the user used by Gerrit. Also the ID-domain will be configured to ensure
+correct ID-mapping.
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `nfsWorkaround.enabled` | Whether the volume used is an NFS-volume | `false` |
+| `nfsWorkaround.chownOnStartup` | Whether to chown the volume on pod startup | `false` |
+| `nfsWorkaround.idDomain` | The ID-domain that should be used to map user-/group-IDs for the NFS mount | `localdomain.com` |
+
+### Network policies
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `networkPolicies.enabled` | Whether to enable preconfigured NetworkPolicies | `false` |
+| `networkPolicies.dnsPorts` | List of ports used by DNS-service (e.g. KubeDNS) | `[53, 8053]` |
+
+The NetworkPolicies provided here are quite strict and do not account for all
+possible scenarios. Thus, custom NetworkPolicies have to be added, e.g. for
+connecting to a database. On the other hand some defaults may be not restrictive
+enough. By default, the ingress traffic of the git-backend pod is not restricted.
+Thus, every source (with the right credentials) could push to the git-backend.
+To add an additional layer of security, the ingress rule could be defined more
+finegrained. The chart provides the possibility to define custom rules for ingress-
+traffic of the git-backend pod under `gitBackend.networkPolicy.ingress`.
+Depending on the scenario, there are different ways to restrict the incoming
+connections.
+
+If the replicator (e.g. Gerrit) is running in a pod on the same cluster,
+a podSelector (and namespaceSelector, if the pod is running in a different
+namespace) can be used to whitelist the traffic:
+
+```yaml
+gitBackend:
+ networkPolicy:
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app: gerrit
+```
+
+If the replicator is outside the cluster, the IP of the replicator can also be
+whitelisted, e.g.:
+
+```yaml
+gitBackend:
+ networkPolicy:
+ ingress:
+ - from:
+ - ipBlock:
+ cidr: xxx.xxx.0.0/16
+```
+
+The same principle also applies to other use cases, e.g. connecting to a database.
+For more information about the NetworkPolicy resource refer to the
+[Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/).
+
+### Storage for Git repositories
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `gitRepositoryStorage.externalPVC.use` | Whether to use a PVC deployed outside the chart | `false` |
+| `gitRepositoryStorage.externalPVC.name` | Name of the external PVC | `git-repositories-pvc` |
+| `gitRepositoryStorage.size` | Size of the volume storing the Git repositories | `5Gi` |
+
+If the git repositories should be persisted even if the chart is deleted and in
+a way that the volume containing them can be mounted by the reinstalled chart,
+the PVC claiming the volume has to be created independently of the chart. To use
+the external PVC, set `gitRepositoryStorage.externalPVC.enabled` to `true` and
+give the name of the PVC under `gitRepositoryStorage.externalPVC.name`.
+
+### Storage for Logs
+
+In addition to collecting logs with a log collection tool like Promtail, the logs
+can also be stored in a persistent volume. This volume has to be a read-write-many
+volume to be able to be used by multiple pods.
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `logStorage.enabled` | Whether to enable persistence of logs | `false` |
+| `logStorage.externalPVC.use` | Whether to use a PVC deployed outside the chart | `false` |
+| `logStorage.externalPVC.name` | Name of the external PVC | `gerrit-logs-pvc` |
+| `logStorage.size` | Size of the volume | `5Gi` |
+| `logStorage.cleanup.enabled` | Whether to regularly delete old logs | `false` |
+| `logStorage.cleanup.schedule` | Cron schedule defining when to run the cleanup job | `0 0 * * *` |
+| `logStorage.cleanup.retentionDays` | Number of days to retain the logs | `14` |
+| `logStorage.cleanup.resources` | Resources the container is allowed to use | `requests.cpu: 100m` |
+| `logStorage.cleanup.additionalPodLabels` | Additional labels for pods | `{}` |
+| | | `requests.memory: 256Mi` |
+| | | `limits.cpu: 100m` |
+| | | `limits.memory: 256Mi` |
+
+Each pod will create a separate folder for its logs, allowing to trace logs to
+the respective pods.
+
+### Istio
+
+Istio can be used as an alternative to Kubernetes Ingresses to manage the traffic
+into the cluster and also inside the cluster. This requires istio to be installed
+beforehand. Some guidance on how to set up istio can be found [here](/Documentation/istio.md).
+The helm chart expects `istio-injection` to be enabled in the namespace, in which
+it will be installed.
+
+In the case istio is used, all configuration for ingresses in the chart will be
+ignored.
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `istio.enabled` | Whether istio should be used (requires istio to be installed) | `false` |
+| `istio.host` | Hostname (CNAME must point to istio ingress gateway loadbalancer service) | `nil` |
+| `istio.tls.enabled` | Whether to enable TLS | `false` |
+| `istio.tls.secret.create` | Whether to create TLS certificate secret | `true` |
+| `istio.tls.secret.name` | Name of external secret containing TLS certificates | `nil` |
+| `istio.tls.cert` | TLS certificate | `-----BEGIN CERTIFICATE-----` |
+| `istio.tls.key` | TLS key | `-----BEGIN RSA PRIVATE KEY-----` |
+| `istio.ssh.enabled` | Whether to enable SSH | `false` |
+
+### Ingress
+
+As an alternative to istio the Nginx Ingress controller can be used to manage
+ingress traffic.
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `ingress.enabled` | Whether to deploy an Ingress | `false` |
+| `ingress.host` | Host name to use for the Ingress (required for Ingress) | `nil` |
+| `ingress.maxBodySize` | Maximum request body size allowed (Set to 0 for an unlimited request body size) | `50m` |
+| `ingress.additionalAnnotations` | Additional annotations for the Ingress | `nil` |
+| `ingress.tls.enabled` | Whether to enable TLS termination in the Ingress | `false` |
+| `ingress.tls.secret.create` | Whether to create a TLS-secret | `true` |
+| `ingress.tls.secret.name` | Name of an external secret that will be used as a TLS-secret | `nil` |
+| `ingress.tls.cert` | Public SSL server certificate | `-----BEGIN CERTIFICATE-----` |
+| `ingress.tls.key` | Private SSL server certificate | `-----BEGIN RSA PRIVATE KEY-----` |
+
+***note
+For graceful shutdown to work with an ingress, the ingress controller has to be
+configured to gracefully close the connections as well.
+***
+
+### Promtail Sidecar
+
+To collect Gerrit logs, a Promtail sidecar can be deployed into the Gerrit replica
+pods. This can for example be used together with the [gerrit-monitoring](https://gerrit-review.googlesource.com/admin/repos/gerrit-monitoring)
+project.
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `promtailSidecar.enabled` | Whether to install the Promatil sidecar container | `false` |
+| `promtailSidecar.image` | The promtail container image to use | `grafana/promtail` |
+| `promtailSidecar.version` | The promtail container image version | `1.3.0` |
+| `promtailSidecar.resources` | Configure the amount of resources the container requests/is allowed | `requests.cpu: 100m` |
+| | | `requests.memory: 128Mi` |
+| | | `limits.cpu: 200m` |
+| | | `limits.memory: 128Mi` |
+| `promtailSidecar.tls.skipverify` | Whether to skip TLS verification | `true` |
+| `promtailSidecar.tls.caCert` | CA certificate for TLS verification | `-----BEGIN CERTIFICATE-----` |
+| `promtailSidecar.loki.url` | URL to reach Loki | `loki.example.com` |
+| `promtailSidecar.loki.user` | Loki user | `admin` |
+| `promtailSidecar.loki.password` | Loki password | `secret` |
+
+
+### Apache-Git-HTTP-Backend (Git-Backend)
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `gitBackend.image` | Image name of the Apache-git-http-backend container image | `k8sgerrit/apache-git-http-backend` |
+| `gitBackend.additionalPodLabels` | Additional labels for Pods | `{}` |
+| `gitBackend.tolerations` | Taints and tolerations work together to ensure that pods are not scheduled onto inappropriate nodes. For more information, please refer to the following documents. [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration) | [] |
+| `gitBackend.topologySpreadConstraints` | Control how Pods are spread across your cluster among failure-domains. For more information, please refer to the following documents. [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints) | {} |
+| `gitBackend.nodeSelector` | Assigns a Pod to the specified Nodes. For more information, please refer to the following documents. [Assign Pods to Nodes](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/). [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) | {} |
+| `gitBackend.affinity` | Assigns a Pod to the specified Nodes | podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].weight: 100 |
+| | | podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey: "topology.kubernetes.io/zone" |
+| | | podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].key: app |
+| | | podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].operator: In |
+| | | podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].values[0]: git-backend |
+| `gitBackend.replicas` | Number of pod replicas to deploy | `1` |
+| `gitBackend.maxSurge` | Max. percentage or number of pods allowed to be scheduled above the desired number | `25%` |
+| `gitBackend.maxUnavailable` | Max. percentage or number of pods allowed to be unavailable at a time | `100%` |
+| `gitBackend.networkPolicy.ingress` | Custom ingress-network policy for git-backend pods | `[{}]` (allow all) |
+| `gitBackend.networkPolicy.egress` | Custom egress-network policy for git-backend pods | `nil` |
+| `gitBackend.resources` | Configure the amount of resources the pod requests/is allowed | `requests.cpu: 100m` |
+| | | `requests.memory: 256Mi` |
+| | | `limits.cpu: 100m` |
+| | | `limits.memory: 256Mi` |
+| `gitBackend.livenessProbe` | Configuration of the liveness probe timings | `{initialDelaySeconds: 10, periodSeconds: 5}` |
+| `gitBackend.readinessProbe` | Configuration of the readiness probe timings | `{initialDelaySeconds: 5, periodSeconds: 1}` |
+| `gitBackend.credentials.htpasswd` | `.htpasswd`-file containing username/password-credentials for accessing git | `git:$apr1$O/LbLKC7$Q60GWE7OcqSEMSfe/K8xU.` (user: git, password: secret) |
+| `gitBackend.service.additionalAnnotations` | Additional annotations for the Service | `{}` |
+| `gitBackend.service.loadBalancerSourceRanges` | The list of allowed IPs for the Service | `[]` |
+| `gitBackend.service.type` | Which kind of Service to deploy | `LoadBalancer` |
+| `gitBackend.service.externalTrafficPolicy` | Specify how traffic from external is handled | `Cluster` |
+| `gitBackend.service.http.enabled` | Whether to serve HTTP-requests (needed for Ingress) | `true` |
+| `gitBackend.service.http.port` | Port over which to expose HTTP | `80` |
+| `gitBackend.service.https.enabled` | Whether to serve HTTPS-requests | `false` |
+| `gitBackend.service.https.port` | Port over which to expose HTTPS | `443` |
+
+***note
+At least one endpoint (HTTP and/or HTTPS) has to be enabled in the service!
+***
+
+Project creation, project deletion and HEAD update can also replicated. To enable
+this feature configure the replication plugin to use an adminUrl using the format
+`gerrit+https://<apache-git-http-backend host>`.
+
+### Git garbage collection
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `gitGC.image` | Image name of the Git-GC container image | `k8sgerrit/git-gc` |
+| `gitGC.schedule` | Cron-formatted schedule with which to run Git garbage collection | `0 6,18 * * *` |
+| `gitGC.resources` | Configure the amount of resources the pod requests/is allowed | `requests.cpu: 100m` |
+| | | `requests.memory: 256Mi` |
+| | | `limits.cpu: 100m` |
+| | | `limits.memory: 256Mi` |
+| `gitGC.tolerations` | Taints and tolerations work together to ensure that pods are not scheduled onto inappropriate nodes. For more information, please refer to the following documents. [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration) | [] |
+| `gitGC.nodeSelector` | Assigns a Pod to the specified Nodes. For more information, please refer to the following documents. [Assign Pods to Nodes](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/). [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) | {} |
+| `gitGC.affinity` | Assigns a Pod to the specified Nodes. For more information, please refer to the following documents. [Assign Pods to Nodes using Node Affinity](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/). [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) | {} |
+| `gitGC.additionalPodLabels` | Additional labels for Pods | `{}` |
+
+### Gerrit replica
+
+***note
+The way the Jetty servlet used by Gerrit works, the Gerrit replica component of the
+gerrit-replica chart actually requires the URL to be known, when the chart is installed.
+The suggested way to do that is to use the provided Ingress resource. This requires
+that a URL is available and that the DNS is configured to point the URL to the
+IP of the node the Ingress controller is running on!
+***
+
+***note
+Setting the canonical web URL in the gerrit.config to the host used for the Ingress
+is mandatory, if access to the Gerrit replica is required!
+***
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `gerritReplica.images.gerritInit` | Image name of the Gerrit init container image | `k8sgerrit/gerrit-init` |
+| `gerritReplica.images.gerritReplica` | Image name of the Gerrit replica container image | `k8sgerrit/gerrit` |
+| `gerritReplica.tolerations` | Taints and tolerations work together to ensure that pods are not scheduled onto inappropriate nodes. For more information, please refer to the following documents. [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration) | [] |
+| `gerritReplica.topologySpreadConstraints` | Control how Pods are spread across your cluster among failure-domains. For more information, please refer to the following documents. [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints) | {} |
+| `gerritReplica.nodeSelector` | Assigns a Pod to the specified Nodes. For more information, please refer to the following documents. [Assign Pods to Nodes](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/). [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) | {} |
+| `gerritReplica.affinity` | Assigns a Pod to the specified Nodes. By default, gerrit-replica is evenly distributed on `topology.kubernetes.io/zone`. For more information, please refer to the following documents. [Assign Pods to Nodes using Node Affinity](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/). [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) | podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].weight: 100 |
+| | | podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey: "topology.kubernetes.io/zone" |
+| | | podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].key: app |
+| | | podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].operator: In |
+| | | podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].values[0]: gerrit-replica |
+| `gerritReplica.replicas` | Number of pod replicas to deploy | `1` |
+| `gerritReplica.additionalAnnotations` | Additional annotations for the Pods | {} |
+| `gerritReplica.additionalPodLabels` | Additional labels for the Pods | `{}` |
+| `gerritReplica.maxSurge` | Max. percentage or number of pods allowed to be scheduled above the desired number | `25%` |
+| `gerritReplica.maxUnavailable` | Max. percentage or number of pods allowed to be unavailable at a time | `100%` |
+| `gerritReplica.livenessProbe` | Configuration of the liveness probe timings | `{initialDelaySeconds: 60, periodSeconds: 5}` |
+| `gerritReplica.readinessProbe` | Configuration of the readiness probe timings | `{initialDelaySeconds: 10, periodSeconds: 10}` |
+| `gerritReplica.startupProbe` | Configuration of the startup probe timings | `{initialDelaySeconds: 10, periodSeconds: 5}` |
+| `gerritReplica.gracefulStopTimeout` | Time in seconds Kubernetes will wait until killing the pod during termination (has to be longer then Gerrit's httpd.gracefulStopTimeout to allow graceful shutdown of Gerrit) | `90` |
+| `gerritReplica.resources` | Configure the amount of resources the pod requests/is allowed | `requests.cpu: 1` |
+| | | `requests.memory: 5Gi` |
+| | | `limits.cpu: 1` |
+| | | `limits.memory: 6Gi` |
+| `gerritReplica.networkPolicy.ingress` | Custom ingress-network policy for gerrit-replica pods | `nil` |
+| `gerritReplica.networkPolicy.egress` | Custom egress-network policy for gerrit-replica pods | `nil` |
+| `gerritReplica.service.additionalAnnotations` | Additional annotations for the Service | `{}` |
+| `gerritReplica.service.loadBalancerSourceRanges` | The list of allowed IPs for the Service | `[]` |
+| `gerritReplica.service.type` | Which kind of Service to deploy | `NodePort` |
+| `gerritReplica.service.externalTrafficPolicy` | Specify how traffic from external is handled | `Cluster` |
+| `gerritReplica.service.http.port` | Port over which to expose HTTP | `80` |
+| `gerritReplica.service.ssh.enabled` | Whether to enable SSH for the Gerrit replica | `false` |
+| `gerritReplica.service.ssh.port` | Port for SSH | `29418` |
+| `gerritReplica.keystore` | base64-encoded Java keystore (`cat keystore.jks \| base64`) to be used by Gerrit, when using SSL | `nil` |
+| `gerritReplica.pluginManagement.plugins` | List of Gerrit plugins to install | `[]` |
+| `gerritReplica.pluginManagement.plugins[0].name` | Name of plugin | `nil` |
+| `gerritReplica.pluginManagement.plugins[0].url` | Download url of plugin. If given the plugin will be downloaded, otherwise it will be installed from the gerrit.war-file. | `nil` |
+| `gerritReplica.pluginManagement.plugins[0].sha1` | SHA1 sum of plugin jar used to ensure file integrity and version (optional) | `nil` |
+| `gerritReplica.pluginManagement.plugins[0].installAsLibrary` | Whether the plugin should be symlinked to the lib-dir in the Gerrit site. | `nil` |
+| `gerritReplica.pluginManagement.libs` | List of Gerrit library modules to install | `[]` |
+| `gerritReplica.pluginManagement.libs[0].name` | Name of the lib module | `nil` |
+| `gerritReplica.pluginManagement.libs[0].url` | Download url of lib module. | `nil` |
+| `gerritReplica.pluginManagement.libs[0].sha1` | SHA1 sum of plugin jar used to ensure file integrity and version | `nil` |
+| `gerritReplica.pluginManagement.cache.enabled` | Whether to cache downloaded plugins | `false` |
+| `gerritReplica.pluginManagement.cache.size` | Size of the volume used to store cached plugins | `1Gi` |
+| `gerritReplica.priorityClassName` | Name of the PriorityClass to apply to replica pods | `nil` |
+| `gerritReplica.etc.config` | Map of config files (e.g. `gerrit.config`) that will be mounted to `$GERRIT_SITE/etc`by a ConfigMap | `{gerrit.config: ..., replication.config: ...}`[see here](#Gerrit-config-files) |
+| `gerritReplica.etc.secret` | Map of config files (e.g. `secure.config`) that will be mounted to `$GERRIT_SITE/etc`by a Secret | `{secure.config: ...}` [see here](#Gerrit-config-files) |
+| `gerritReplica.additionalConfigMaps` | Allows to mount additional ConfigMaps into a subdirectory of `$SITE/data` | `[]` |
+| `gerritReplica.additionalConfigMaps[*].name` | Name of the ConfigMap | `nil` |
+| `gerritReplica.additionalConfigMaps[*].subDir` | Subdirectory under `$SITE/data` into which the files should be symlinked | `nil` |
+| `gerritReplica.additionalConfigMaps[*].data` | Data of the ConfigMap. If not set, ConfigMap has to be created manually | `nil` |
+
+### Gerrit config files
+
+The gerrit-replica chart provides a ConfigMap containing the configuration files
+used by Gerrit, e.g. `gerrit.config` and a Secret containing sensitive configuration
+like the `secure.config` to configure the Gerrit installation in the Gerrit
+component. The content of the config files can be set in the `values.yaml` under
+the keys `gerritReplica.etc.config` and `gerritReplica.etc.secret` respectively.
+The key has to be the filename (eg. `gerrit.config`) and the file's contents
+the value. This way an arbitrary number of configuration files can be loaded into
+the `$GERRIT_SITE/etc`-directory, e.g. for plugins.
+All configuration options for Gerrit are described in detail in the
+[official documentation of Gerrit](https://gerrit-review.googlesource.com/Documentation/config-gerrit.html).
+Some options however have to be set in a specified way for Gerrit to work as
+intended with the chart:
+
+- `gerrit.basePath`
+
+ Path to the directory containing the repositories. The chart mounts this
+ directory from a persistent volume to `/var/gerrit/git` in the container. For
+ Gerrit to find the correct directory, this has to be set to `git`.
+
+- `gerrit.serverId`
+
+ In Gerrit-version higher than 2.14 Gerrit needs a server ID, which is used by
+ NoteDB. Gerrit would usually generate a random ID on startup, but since the
+ gerrit.config file is read only, when mounted as a ConfigMap this fails.
+ Thus the server ID has to be set manually!
+
+- `gerrit.canonicalWebUrl`
+
+ The canonical web URL has to be set to the Ingress host.
+
+- `httpd.listenURL`
+
+ This has to be set to `proxy-http://*:8080/` or `proxy-https://*:8080`,
+ depending of TLS is enabled in the Ingress or not, otherwise the Jetty
+ servlet will run into an endless redirect loop.
+
+- `httpd.gracefulStopTimeout` / `sshd.gracefulStopTimeout`
+
+ To enable graceful shutdown of the embedded jetty server and SSHD, a timeout
+ has to be set with this option. This will be the maximum time, Gerrit will wait
+ for HTTP requests to finish before shutdown.
+
+- `container.user`
+
+ The technical user in the Gerrit replica container is called `gerrit`. Thus, this
+ value is required to be `gerrit`.
+
+- `container.replica`
+
+ Since this chart is meant to install a Gerrit replica, this naturally has to be
+ `true`.
+
+- `container.javaHome`
+
+ This has to be set to `/usr/lib/jvm/java-11-openjdk-amd64`, since this is
+ the path of the Java installation in the container.
+
+- `container.javaOptions`
+
+ The maximum heap size has to be set. And its value has to be lower than the
+ memory resource limit set for the container (e.g. `-Xmx4g`). In your calculation
+ allow memory for other components running in the container.
+
+To enable liveness- and readiness probes, the healthcheck plugin will be installed
+by default. Note, that by configuring to use a packaged or downloaded version of
+the healthcheck plugin, the configured version will take precedence over the default
+version. The plugin is by default configured to disable the `querychanges` and
+`auth` healthchecks, since the Gerrit replica does not index changes and a new
+Gerrit server will not yet necessarily have an user to validate authentication.
+
+The default configuration can be overwritten by adding the `healthcheck.config`
+file as a key-value pair to `gerritReplica.etc.config` as for every other configuration.
+
+SSH keys should be configured via the helm-chart using the `gerritReplica.etc.secret`
+map. Gerrit will create its own keys, if none are present in the site, but if
+multiple Gerrit pods are running, each Gerrit instance would have its own keys.
+Users accessing Gerrit via a load balancer would get issues due to changing
+host keys.
+
+## Upgrading the Chart
+
+To upgrade an existing installation of the gerrit-replica chart, e.g. to install
+a newer chart version or to use an updated custom `values.yaml`-file, execute
+the following command:
+
+```sh
+cd $(git rev-parse --show-toplevel)/helm-charts
+helm upgrade \
+ <release-name> \
+ ./gerrit-replica \ # path to chart
+ -f <path-to-custom-values>.yaml \
+```
+
+## Uninstalling the Chart
+
+To delete the chart from the cluster, use:
+
+```sh
+helm delete <release-name>
+```
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/docs/nfs-provisioner.md b/charts/k8s-gerrit/helm-charts/gerrit-replica/docs/nfs-provisioner.md
new file mode 100644
index 0000000..e2d0806
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/docs/nfs-provisioner.md
@@ -0,0 +1,64 @@
+# Installing a NFS-provisioner
+
+The Gerrit replica requires access to a persistent volume capable of running in
+`Read Write Many (RWM)`-mode to store the git repositories, since the repositories
+have to be accessed by mutiple pods. One possibility to provide such volumes
+is to install a provisioner for NFS-volumes into the same Kubernetes-cluster.
+This document will guide through the process.
+
+The [Kubernetes external-storage project](https://github.com/kubernetes-incubator/external-storage)
+provides an out-of-tree dynamic [provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs)
+for NFS volumes. A chart exists for easy deployment of the project onto a
+Kubernetes cluster. The chart's sources can be found [here](https://github.com/helm/charts/tree/master/stable/nfs-server-provisioner).
+
+## Prerequisites
+
+This guide will use Helm to install the NFS-provisioner. Thus, Helm has to be
+installed.
+
+## Installing the nfs-server-provisioner chart
+
+A custom `values.yaml`-file containing a configuration tested with the
+gerrit-replica chart can be found in the `supplements/nfs`-directory in the
+gerrit-replica chart's root directory. In addition a file stating the tested
+version of the nfs-server-provisioner chart is present in the same directory.
+
+If needed, adapt the `values.yaml`-file for the nfs-server-provisioner chart
+further and then run:
+
+```sh
+cd $(git rev-parse --show-toplevel)/helm-charts/gerrit-replica/supplements/nfs
+helm install nfs \
+ stable/nfs-server-provisioner \
+ -f values.yaml \
+ --version $(cat VERSION)
+```
+
+For a description of the configuration options, refer to the
+[chart's documentation](https://github.com/helm/charts/blob/master/stable/nfs-server-provisioner/README.md).
+
+Here are some tips for configuring the nfs-server-provisioner chart to work with
+the gerrit-replica chart:
+
+- Deploying more than 1 `replica` led to some reliability issues in tests and
+ should be further tested for now, if required.
+- The name of the StorageClass created for NFS-volumes has to be the same as the
+ one defined in the gerrit-replica chart for `storageClasses.shared.name`
+- The StorageClas for NFS-volumes needs to have the parameter `mountOptions: vers=4.1`,
+ due to compatibility [issues](https://github.com/kubernetes-incubator/external-storage/issues/223)
+ with Ganesha.
+
+## Deleting the nfs-server-provisioner chart
+
+***note
+**Attention:** Never delete the nfs-server-provisioner chart, if there is still a
+PersistentVolumeClaim and Pods using a NFS-volume provisioned by the NFS server
+provisioner. This will lead to crashed pods, that will not be terminated correctly.
+***
+
+If no Pod or PVC is using a NFS-volume provisioned by the NFS server provisioner
+anymore, delete it like any other chart:
+
+```sh
+helm delete nfs
+```
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/supplements/nfs/VERSION b/charts/k8s-gerrit/helm-charts/gerrit-replica/supplements/nfs/VERSION
new file mode 100644
index 0000000..7dff5b8
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/supplements/nfs/VERSION
@@ -0,0 +1 @@
+0.2.1
\ No newline at end of file
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/supplements/nfs/values.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/supplements/nfs/values.yaml
new file mode 100644
index 0000000..aa3d9ce
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/supplements/nfs/values.yaml
@@ -0,0 +1,42 @@
+# Deploying more than 1 `replica` led to some reliability issues in tests and
+# should be further tested for now, if required.
+replicaCount: 1
+
+image:
+ repository: quay.io/kubernetes_incubator/nfs-provisioner
+ tag: v1.0.9
+ pullPolicy: IfNotPresent
+
+service:
+ type: ClusterIP
+ nfsPort: 2049
+ mountdPort: 20048
+ rpcbindPort: 51413
+
+persistence:
+ enabled: true
+ storageClass: default
+ accessMode: ReadWriteOnce
+ size: 7.5Gi
+
+storageClass:
+ create: true
+ defaultClass: false
+ # The name of the StorageClass has to be the same as the one defined in the
+ # gerrit-replica chart for `storageClasses.shared.name`
+ name: shared-storage
+ parameters:
+ # Required!
+ mountOptions: vers=4.1
+ reclaimPolicy: Delete
+
+rbac:
+ create: true
+
+resources:
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ limits:
+ cpu: 100m
+ memory: 256Mi
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/NOTES.txt b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/NOTES.txt
new file mode 100644
index 0000000..30e263f
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/NOTES.txt
@@ -0,0 +1,35 @@
+A Gerrit replica has been deployed.
+=================================
+
+The Apache-Git-HTTP-Backend is now ready to receive replication requests from the
+primary Gerrit. Please configure the replication plugin of the primary Gerrit to
+push the repositories to:
+
+{{ if .Values.istio.enabled -}}
+ http {{- if .Values.istio.tls.enabled -}} s {{- end -}} :// {{- .Values.istio.host -}} /${name}.git
+{{ else if .Values.ingress.enabled -}}
+ http {{- if .Values.ingress.tls.enabled -}} s {{- end -}} :// {{- .Values.ingress.host -}} /${name}.git
+{{- else }}
+ http://<EXTERNAL-IP>: {{- .Values.gitBackend.service.http.port -}} /${name}.git
+ The external IP of the service can be found by running:
+ kubectl get svc git-backend-service
+{{- end }}
+
+Project creation, project deletion and HEAD update can also be replicated. To enable
+this feature configure the replication plugin to use an adminUrl using the format
+`gerrit+http {{- if .Values.ingress.tls.enabled -}} s {{- end -}} :// {{- .Values.ingress.host -}}`.
+
+A detailed guide of how to configure Gerrit's replication plugin can be found here:
+
+https://gerrit.googlesource.com/plugins/replication/+doc/master/src/main/resources/Documentation/config.md
+
+The Gerrit replica is starting up.
+
+The initialization process may take some time. Afterwards the git repositories
+will be available under:
+
+{{ if .Values.istio.enabled -}}
+ http {{- if .Values.istio.tls.enabled -}} s {{- end -}} :// {{- .Values.istio.host -}} /<repository-name>.git
+{{- else }}
+ http {{- if .Values.ingress.tls.enabled -}} s {{- end -}} :// {{- .Values.ingress.host -}} /<repository-name>.git
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/_helpers.tpl b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/_helpers.tpl
new file mode 100644
index 0000000..500d58c
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/_helpers.tpl
@@ -0,0 +1,20 @@
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "gerrit-replica.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create secret to access docker registry
+*/}}
+{{- define "imagePullSecret" }}
+{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.images.registry.name (printf "%s:%s" .Values.images.registry.ImagePullSecret.username .Values.images.registry.ImagePullSecret.password | b64enc) | b64enc }}
+{{- end }}
+
+{{/*
+Add '/' to registry if needed.
+*/}}
+{{- define "registry" -}}
+{{ if .Values.images.registry.name }}{{- printf "%s/" .Values.images.registry.name -}}{{end}}
+{{- end -}}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/gerrit-replica.configmap.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/gerrit-replica.configmap.yaml
new file mode 100644
index 0000000..1aa9496
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/gerrit-replica.configmap.yaml
@@ -0,0 +1,78 @@
+{{- $root := . -}}
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Release.Name }}-gerrit-replica-configmap
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ {{- range $key, $value := .Values.gerritReplica.etc.config }}
+ {{ $key }}:
+{{ toYaml $value | indent 4 }}
+ {{- end }}
+ {{- if not (hasKey .Values.gerritReplica.etc.config "healthcheck.config") }}
+ healthcheck.config: |-
+ [healthcheck "auth"]
+ # On new instances there may be no users to use for healthchecks
+ enabled = false
+ [healthcheck "querychanges"]
+ # On new instances there won't be any changes to query
+ enabled = false
+ {{- end }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Release.Name }}-gerrit-init-configmap
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ gerrit-init.yaml: |-
+ {{ if .Values.caCert -}}
+ caCertPath: /var/config/ca.crt
+ {{- end }}
+ pluginCacheEnabled: {{ .Values.gerritReplica.pluginManagement.cache.enabled }}
+ pluginCacheDir: /var/mnt/plugins
+ {{- if .Values.gerritReplica.pluginManagement.plugins }}
+ plugins:
+{{ toYaml .Values.gerritReplica.pluginManagement.plugins | indent 6}}
+ {{- end }}
+ {{- if .Values.gerritReplica.pluginManagement.libs }}
+ libs:
+{{ toYaml .Values.gerritReplica.pluginManagement.libs | indent 6}}
+ {{- end }}
+{{- range .Values.gerritReplica.additionalConfigMaps -}}
+{{- if .data }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ $root.Release.Name }}-{{ .name }}
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ $root.Release.Name }}
+ chart: {{ template "gerrit-replica.chart" $root }}
+ heritage: {{ $root.Release.Service }}
+ release: {{ $root.Release.Name }}
+ {{- if $root.Values.additionalLabels }}
+{{ toYaml $root.Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+{{ toYaml .data | indent 2 }}
+{{- end }}
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/gerrit-replica.secrets.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/gerrit-replica.secrets.yaml
new file mode 100644
index 0000000..ece9b9a
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/gerrit-replica.secrets.yaml
@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-gerrit-replica-secure-config
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ {{ if .Values.gerritReplica.keystore -}}
+ keystore: {{ .Values.gerritReplica.keystore }}
+ {{- end }}
+ {{- range $key, $value := .Values.gerritReplica.etc.secret }}
+ {{ $key }}: {{ $value | b64enc }}
+ {{- end }}
+type: Opaque
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/gerrit-replica.service.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/gerrit-replica.service.yaml
new file mode 100644
index 0000000..01030b4
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/gerrit-replica.service.yaml
@@ -0,0 +1,40 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Release.Name }}-gerrit-replica-service
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+ {{- if .Values.gerritReplica.service.additionalAnnotations }}
+ annotations:
+{{ toYaml .Values.gerritReplica.service.additionalAnnotations | indent 4 }}
+ {{- end }}
+spec:
+ {{ with .Values.gerritReplica.service }}
+ {{- if .loadBalancerSourceRanges -}}
+ loadBalancerSourceRanges:
+{{- range .loadBalancerSourceRanges }}
+ - {{ . | quote }}
+{{- end }}
+ {{- end }}
+ ports:
+ - name: http
+ port: {{ .http.port }}
+ targetPort: 8080
+ {{ if .ssh.enabled -}}
+ - name: ssh
+ port: {{ .ssh.port }}
+ targetPort: 29418
+ {{- end }}
+ type: {{ .type }}
+ externalTrafficPolicy: {{ .externalTrafficPolicy }}
+ {{- end }}
+ selector:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/gerrit-replica.stateful-set.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/gerrit-replica.stateful-set.yaml
new file mode 100644
index 0000000..d4d74a9
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/gerrit-replica.stateful-set.yaml
@@ -0,0 +1,337 @@
+{{- $root := . -}}
+
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ .Release.Name }}-gerrit-replica-statefulset
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ serviceName: {{ .Release.Name }}-gerrit-replica-service
+ replicas: {{ .Values.gerritReplica.replicas }}
+ updateStrategy:
+ rollingUpdate:
+ partition: {{ .Values.gerritReplica.updatePartition }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 8 }}
+ {{- end }}
+ {{- if .Values.gerritReplica.additionalPodLabels }}
+{{ toYaml .Values.gerritReplica.additionalPodLabels | indent 8 }}
+ {{- end }}
+ annotations:
+ chartRevision: "{{ .Release.Revision }}"
+ {{- if .Values.gerritReplica.additionalAnnotations }}
+{{ toYaml .Values.gerritReplica.additionalAnnotations | indent 8 }}
+ {{- end }}
+ spec:
+ {{- with .Values.gerritReplica.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gerritReplica.topologySpreadConstraints }}
+ topologySpreadConstraints:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gerritReplica.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gerritReplica.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gerritReplica.priorityClassName }}
+ priorityClassName: {{ . }}
+ {{- end }}
+ terminationGracePeriodSeconds: {{ .Values.gerritReplica.gracefulStopTimeout }}
+ securityContext:
+ fsGroup: 100
+ {{ if .Values.images.registry.ImagePullSecret.name -}}
+ imagePullSecrets:
+ - name: {{ .Values.images.registry.ImagePullSecret.name }}
+ {{- range .Values.images.additionalImagePullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+ initContainers:
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.chownOnStartup }}
+ - name: nfs-init
+ image: {{ .Values.images.busybox.registry -}}/busybox:{{- .Values.images.busybox.tag }}
+ command:
+ - sh
+ - -c
+ args:
+ - |
+ chown 1000:100 /var/mnt/logs
+ chown 1000:100 /var/mnt/git
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: logs
+ subPathExpr: "gerrit-replica/$(POD_NAME)"
+ mountPath: "/var/mnt/logs"
+ - name: git-repositories
+ mountPath: "/var/mnt/git"
+ {{- if .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ {{- end }}
+ - name: gerrit-init
+ image: {{ template "registry" . }}{{ .Values.gerritReplica.images.gerritInit }}:{{ .Values.images.version }}
+ imagePullPolicy: {{ .Values.images.imagePullPolicy }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: gerrit-site
+ mountPath: "/var/gerrit"
+ - name: git-repositories
+ mountPath: "/var/mnt/git"
+ - name: logs
+ subPathExpr: "gerrit-replica/$(POD_NAME)"
+ mountPath: "/var/mnt/logs"
+ - name: gerrit-init-config
+ mountPath: "/var/config/gerrit-init.yaml"
+ subPath: gerrit-init.yaml
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ {{- if and .Values.gerritReplica.pluginManagement.cache.enabled }}
+ - name: gerrit-plugin-cache
+ mountPath: "/var/mnt/plugins"
+ {{- end }}
+ - name: gerrit-config
+ mountPath: "/var/mnt/etc/config"
+ - name: gerrit-replica-secure-config
+ mountPath: "/var/mnt/etc/secret"
+ {{ if .Values.caCert -}}
+ - name: tls-ca
+ subPath: ca.crt
+ mountPath: "/var/config/ca.crt"
+ {{- end }}
+ {{- range .Values.gerritReplica.additionalConfigMaps }}
+ - name: {{ .name }}
+ mountPath: "/var/mnt/data/{{ .subDir }}"
+ {{- end }}
+ containers:
+ - name: gerrit-replica
+ image: {{ template "registry" . }}{{ .Values.gerritReplica.images.gerritReplica }}:{{ .Values.images.version }}
+ imagePullPolicy: {{ .Values.images.imagePullPolicy }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/bin/ash"
+ - "-c"
+ - "kill -2 $(pidof java) && tail --pid=$(pidof java) -f /dev/null"
+ ports:
+ - name: http
+ containerPort: 8080
+ {{ if .Values.gerritReplica.service.ssh -}}
+ - name: ssh
+ containerPort: 29418
+ {{- end }}
+ volumeMounts:
+ - name: gerrit-site
+ mountPath: "/var/gerrit"
+ - name: git-repositories
+ mountPath: "/var/mnt/git"
+ - name: logs
+ subPathExpr: "gerrit-replica/$(POD_NAME)"
+ mountPath: "/var/mnt/logs"
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ - name: gerrit-config
+ mountPath: "/var/mnt/etc/config"
+ - name: gerrit-replica-secure-config
+ mountPath: "/var/mnt/etc/secret"
+ {{- range .Values.gerritReplica.additionalConfigMaps }}
+ - name: {{ .name }}
+ mountPath: "/var/mnt/data/{{ .subDir }}"
+ {{- end }}
+ livenessProbe:
+ httpGet:
+ path: /config/server/healthcheck~status
+ port: http
+{{ toYaml .Values.gerritReplica.livenessProbe | indent 10 }}
+ readinessProbe:
+ httpGet:
+ path: /config/server/healthcheck~status
+ port: http
+{{ toYaml .Values.gerritReplica.readinessProbe | indent 10 }}
+ startupProbe:
+ httpGet:
+ path: /config/server/healthcheck~status
+ port: http
+{{ toYaml .Values.gerritReplica.startupProbe | indent 10 }}
+ resources:
+{{ toYaml .Values.gerritReplica.resources | indent 10 }}
+ {{ if .Values.istio.enabled -}}
+ - name: istio-proxy
+ image: auto
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/bin/sh"
+ - "-c"
+ - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l | xargs) -ne 0 ]; do sleep 1; done"
+ {{- end }}
+ {{ if .Values.promtailSidecar.enabled -}}
+ - name: promtail
+ image: {{ .Values.promtailSidecar.image }}:v{{ .Values.promtailSidecar.version }}
+ imagePullPolicy: {{ .Values.images.imagePullPolicy }}
+ command:
+ - sh
+ - -ec
+ args:
+ - |-
+ /usr/bin/promtail \
+ -config.file=/etc/promtail/promtail.yaml \
+ -client.url={{ .Values.promtailSidecar.loki.url }}/loki/api/v1/push \
+ -client.external-labels=instance=$HOSTNAME
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ resources:
+{{ toYaml .Values.promtailSidecar.resources | indent 10 }}
+ volumeMounts:
+ - name: promtail-config
+ mountPath: /etc/promtail/promtail.yaml
+ subPath: promtail.yaml
+ - name: promtail-secret
+ mountPath: /etc/promtail/promtail.secret
+ subPath: promtail.secret
+ {{- if not .Values.promtailSidecar.tls.skipVerify }}
+ - name: tls-ca
+ mountPath: /etc/promtail/promtail.ca.crt
+ subPath: ca.crt
+ {{- end }}
+ - name: logs
+ subPathExpr: "gerrit-replica/$(POD_NAME)"
+ mountPath: "/var/gerrit/logs"
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ {{- end }}
+ volumes:
+ {{ if not .Values.gerritReplica.persistence.enabled -}}
+ - name: gerrit-site
+ emptyDir: {}
+ {{- end }}
+ {{- if and .Values.gerritReplica.pluginManagement.cache.enabled }}
+ - name: gerrit-plugin-cache
+ persistentVolumeClaim:
+ claimName: {{ .Release.Name }}-plugin-cache-pvc
+ {{- end }}
+ - name: git-repositories
+ persistentVolumeClaim:
+ {{- if .Values.gitRepositoryStorage.externalPVC.use }}
+ claimName: {{ .Values.gitRepositoryStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-git-repositories-pvc
+ {{- end }}
+ - name: logs
+ {{ if .Values.logStorage.enabled -}}
+ persistentVolumeClaim:
+ {{- if .Values.logStorage.externalPVC.use }}
+ claimName: {{ .Values.logStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-log-pvc
+ {{- end }}
+ {{ else -}}
+ emptyDir: {}
+ {{- end }}
+ - name: gerrit-init-config
+ configMap:
+ name: {{ .Release.Name }}-gerrit-init-configmap
+ - name: gerrit-config
+ configMap:
+ name: {{ .Release.Name }}-gerrit-replica-configmap
+ - name: gerrit-replica-secure-config
+ secret:
+ secretName: {{ .Release.Name }}-gerrit-replica-secure-config
+ {{ if .Values.caCert -}}
+ - name: tls-ca
+ secret:
+ secretName: {{ .Release.Name }}-tls-ca
+ {{- end }}
+ {{- range .Values.gerritReplica.additionalConfigMaps }}
+ - name: {{ .name }}
+ configMap:
+ name: {{ if .data }}{{ $root.Release.Name }}-{{ .name }}{{ else }}{{ .name }}{{ end }}
+ {{- end }}
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ configMap:
+ name: {{ .Release.Name }}-nfs-configmap
+ {{- end }}
+ {{ if .Values.promtailSidecar.enabled -}}
+ - name: promtail-config
+ configMap:
+ name: {{ .Release.Name }}-promtail-gerrit-configmap
+ - name: promtail-secret
+ secret:
+ secretName: {{ .Release.Name }}-promtail-secret
+ {{- end }}
+ {{ if .Values.gerritReplica.persistence.enabled -}}
+ volumeClaimTemplates:
+ - metadata:
+ name: gerrit-site
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 8 }}
+ {{- end }}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.gerritReplica.persistence.size }}
+ storageClassName: {{ .Values.storageClasses.default.name }}
+ {{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/gerrit-replica.storage.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/gerrit-replica.storage.yaml
new file mode 100644
index 0000000..c710737
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/gerrit-replica.storage.yaml
@@ -0,0 +1,22 @@
+{{- if and .Values.gerritReplica.pluginManagement.cache.enabled }}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-plugin-cache-pvc
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: {{ .Values.gerritReplica.pluginManagement.cache.size }}
+ storageClassName: {{ .Values.storageClasses.shared.name }}
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/git-backend.deployment.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/git-backend.deployment.yaml
new file mode 100644
index 0000000..037bcb9
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/git-backend.deployment.yaml
@@ -0,0 +1,168 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ .Release.Name }}-git-backend-deployment
+ labels:
+ app.kubernetes.io/component: git-backend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ replicas: {{ .Values.gitBackend.replicas }}
+ strategy:
+ rollingUpdate:
+ maxSurge: {{ .Values.gitBackend.maxSurge }}
+ maxUnavailable: {{ .Values.gitBackend.maxUnavailable }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: git-backend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: git-backend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 8 }}
+ {{- end }}
+ {{- if .Values.gitBackend.additionalPodLabels }}
+{{ toYaml .Values.gitBackend.additionalPodLabels | indent 8 }}
+ {{- end }}
+ annotations:
+ chartRevision: "{{ .Release.Revision }}"
+ spec:
+ {{- with .Values.gitBackend.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gitBackend.topologySpreadConstraints }}
+ topologySpreadConstraints:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gitBackend.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gitBackend.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ securityContext:
+ fsGroup: 100
+ {{ if .Values.images.registry.ImagePullSecret.name -}}
+ imagePullSecrets:
+ - name: {{ .Values.images.registry.ImagePullSecret.name }}
+ {{- range .Values.images.additionalImagePullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+ initContainers:
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.chownOnStartup }}
+ - name: nfs-init
+ image: {{ .Values.images.busybox.registry -}}/busybox:{{- .Values.images.busybox.tag }}
+ command:
+ - sh
+ - -c
+ args:
+ - |
+ chown 1000:100 /var/mnt/logs
+ chown 1000:100 /var/mnt/git
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: logs
+ subPathExpr: "gerrit-replica/$(POD_NAME)"
+ mountPath: "/var/mnt/logs"
+ - name: git-repositories
+ mountPath: "/var/mnt/git"
+ {{- if .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ {{- end }}
+ containers:
+ - name: apache-git-http-backend
+ imagePullPolicy: {{ .Values.images.imagePullPolicy }}
+ image: {{ template "registry" . }}{{ .Values.gitBackend.image }}:{{ .Values.images.version }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ ports:
+ - name: http-port
+ containerPort: 80
+ resources:
+{{ toYaml .Values.gitBackend.resources | indent 10 }}
+ livenessProbe:
+ tcpSocket:
+ port: http-port
+{{ toYaml .Values.gitBackend.livenessProbe | indent 10 }}
+ readinessProbe:
+ tcpSocket:
+ port: http-port
+{{ toYaml .Values.gitBackend.readinessProbe | indent 10 }}
+ volumeMounts:
+ - name: git-repositories
+ mountPath: "/var/gerrit/git"
+ - name: logs
+ subPathExpr: "apache-git-http-backend/$(POD_NAME)"
+ mountPath: "/var/log/apache2"
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ - name: git-backend-secret
+ readOnly: true
+ subPath: .htpasswd
+ mountPath: "/var/apache/credentials/.htpasswd"
+ {{ if .Values.istio.enabled -}}
+ - name: istio-proxy
+ image: auto
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/bin/sh"
+ - "-c"
+ - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l | xargs) -ne 0 ]; do sleep 1; done"
+ {{- end }}
+ volumes:
+ - name: git-repositories
+ persistentVolumeClaim:
+ {{- if .Values.gitRepositoryStorage.externalPVC.use }}
+ claimName: {{ .Values.gitRepositoryStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-git-repositories-pvc
+ {{- end }}
+ - name: git-backend-secret
+ secret:
+ secretName: {{ .Release.Name }}-git-backend-secret
+ - name: logs
+ {{ if .Values.logStorage.enabled -}}
+ persistentVolumeClaim:
+ {{- if .Values.logStorage.externalPVC.use }}
+ claimName: {{ .Values.logStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-log-pvc
+ {{- end }}
+ {{ else -}}
+ emptyDir: {}
+ {{- end }}
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ configMap:
+ name: {{ .Release.Name }}-nfs-configmap
+ {{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/git-backend.secrets.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/git-backend.secrets.yaml
new file mode 100644
index 0000000..94b1705
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/git-backend.secrets.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-git-backend-secret
+ labels:
+ app.kubernetes.io/component: git-backend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ .htpasswd: {{ required "A .htpasswd-file is required for the git backend." .Values.gitBackend.credentials.htpasswd | b64enc }}
+type: Opaque
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/git-backend.service.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/git-backend.service.yaml
new file mode 100644
index 0000000..7bd47ef
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/git-backend.service.yaml
@@ -0,0 +1,35 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Release.Name }}-git-backend-service
+ labels:
+ app.kubernetes.io/component: git-backend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+ {{- if .Values.gitBackend.service.additionalAnnotations }}
+ annotations:
+{{ toYaml .Values.gitBackend.service.additionalAnnotations | indent 4 }}
+ {{- end }}
+spec:
+ {{ with .Values.gitBackend.service }}
+ {{- if .loadBalancerSourceRanges -}}
+ loadBalancerSourceRanges:
+{{- range .loadBalancerSourceRanges }}
+ - {{ . | quote }}
+{{- end }}
+ {{- end }}
+ ports:
+ - name: http
+ port: {{ .http.port }}
+ targetPort: 80
+ type: {{ .type }}
+ externalTrafficPolicy: {{ .externalTrafficPolicy }}
+ {{- end }}
+ selector:
+ app.kubernetes.io/component: git-backend
+ app.kubernetes.io/instance: {{ .Release.Name }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/git-gc.cronjob.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/git-gc.cronjob.yaml
new file mode 100644
index 0000000..028ffe9
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/git-gc.cronjob.yaml
@@ -0,0 +1,134 @@
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: {{ .Release.Name }}-git-gc
+ labels:
+ app.kubernetes.io/component: git-gc
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ schedule: {{ .Values.gitGC.schedule | quote }}
+ concurrencyPolicy: "Forbid"
+ jobTemplate:
+ spec:
+ template:
+ metadata:
+ annotations:
+ cluster-autoscaler.kubernetes.io/safe-to-evict: "false"
+ {{ if .Values.istio.enabled }}
+ sidecar.istio.io/inject: "false"
+ {{- end }}
+ labels:
+ app.kubernetes.io/component: git-gc
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 12 }}
+ {{- end }}
+ {{- if .Values.gitGC.additionalPodLabels }}
+{{ toYaml .Values.gitGC.additionalPodLabels | indent 12 }}
+ {{- end }}
+ spec:
+ {{- with .Values.gitGC.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 10 }}
+ {{- end }}
+ {{- with .Values.gitGC.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.gitGC.affinity }}
+ affinity:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ restartPolicy: OnFailure
+ securityContext:
+ fsGroup: 100
+ {{ if .Values.images.registry.ImagePullSecret.name -}}
+ imagePullSecrets:
+ - name: {{ .Values.images.registry.ImagePullSecret.name }}
+ {{- range .Values.images.additionalImagePullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+ initContainers:
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.chownOnStartup }}
+ - name: nfs-init
+ image: {{ .Values.images.busybox.registry -}}/busybox:{{- .Values.images.busybox.tag }}
+ command:
+ - sh
+ - -c
+ args:
+ - |
+ chown 1000:100 /var/mnt/logs
+ chown 1000:100 /var/mnt/git
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: logs
+ subPathExpr: "git-gc/$(POD_NAME)"
+ mountPath: "/var/mnt/logs"
+ - name: git-repositories
+ mountPath: "/var/mnt/git"
+ {{- if .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ {{- end }}
+ containers:
+ - name: git-gc
+ imagePullPolicy: {{ .Values.images.imagePullPolicy }}
+ image: {{ template "registry" . }}{{ .Values.gitGC.image }}:{{ .Values.images.version }}
+ resources:
+{{ toYaml .Values.gitGC.resources | indent 14 }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: git-repositories
+ mountPath: "/var/gerrit/git"
+ - name: logs
+ subPathExpr: "git-gc/$(POD_NAME)"
+ mountPath: "/var/log/git"
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ volumes:
+ - name: git-repositories
+ persistentVolumeClaim:
+ {{- if .Values.gitRepositoryStorage.externalPVC.use }}
+ claimName: {{ .Values.gitRepositoryStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-git-repositories-pvc
+ {{- end }}
+ - name: logs
+ {{ if .Values.logStorage.enabled -}}
+ persistentVolumeClaim:
+ {{- if .Values.logStorage.externalPVC.use }}
+ claimName: {{ .Values.logStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-log-pvc
+ {{- end }}
+ {{ else -}}
+ emptyDir: {}
+ {{- end }}
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ configMap:
+ name: {{ .Release.Name }}-nfs-configmap
+ {{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/global.secrets.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/global.secrets.yaml
new file mode 100644
index 0000000..7dfe4a1
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/global.secrets.yaml
@@ -0,0 +1,18 @@
+{{ if .Values.caCert -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-tls-ca
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ ca.crt: {{ .Values.caCert | b64enc }}
+type: Opaque
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/image-pull.secret.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/image-pull.secret.yaml
new file mode 100644
index 0000000..3f97cd0
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/image-pull.secret.yaml
@@ -0,0 +1,13 @@
+{{ if and .Values.images.registry.ImagePullSecret.name .Values.images.registry.ImagePullSecret.create -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Values.images.registry.ImagePullSecret.name }}
+ labels:
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+type: kubernetes.io/dockerconfigjson
+data:
+ .dockerconfigjson: {{ template "imagePullSecret" . }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/ingress.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/ingress.yaml
new file mode 100644
index 0000000..e78dfcc
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/ingress.yaml
@@ -0,0 +1,86 @@
+{{ if and .Values.ingress.enabled (not .Values.istio.enabled) -}}
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: {{ .Release.Name }}-ingress
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ nginx.ingress.kubernetes.io/proxy-body-size: {{ .Values.ingress.maxBodySize | default "50m" }}
+ nginx.ingress.kubernetes.io/use-regex: "true"
+ nginx.ingress.kubernetes.io/configuration-snippet: |-
+ if ($args ~ service=git-receive-pack){
+ set $proxy_upstream_name "{{ .Release.Namespace }}-{{ .Release.Name }}-git-backend-service-http";
+ set $proxy_host $proxy_upstream_name;
+ set $service_name "{{ .Release.Name }}-git-backend-service";
+ }
+ {{- if .Values.ingress.additionalAnnotations }}
+{{ toYaml .Values.ingress.additionalAnnotations | indent 4 }}
+ {{- end }}
+spec:
+ {{ if .Values.ingress.tls.enabled -}}
+ tls:
+ - hosts:
+ - {{ .Values.ingress.host }}
+ {{ if .Values.ingress.tls.secret.create -}}
+ secretName: {{ .Release.Name }}-tls-secret
+ {{- else }}
+ secretName: {{ .Values.ingress.tls.secret.name }}
+ {{- end }}
+ {{- end }}
+ rules:
+ - host: {{required "A host URL is required for the ingress. Please set 'ingress.host'" .Values.ingress.host }}
+ http:
+ paths:
+ - pathType: Prefix
+ path: /a/projects
+ backend:
+ service:
+ name: {{ .Release.Name }}-git-backend-service
+ port:
+ number: {{ .Values.gitBackend.service.http.port }}
+ - pathType: Prefix
+ path: "/.*/git-receive-pack"
+ backend:
+ service:
+ name: {{ .Release.Name }}-git-backend-service
+ port:
+ number: {{ .Values.gitBackend.service.http.port }}
+ - pathType: Prefix
+ path: /
+ backend:
+ service:
+ name: {{ .Release.Name }}-gerrit-replica-service
+ port:
+ number: {{ .Values.gerritReplica.service.http.port }}
+{{- end }}
+---
+{{ if and (and .Values.ingress.tls.enabled .Values.ingress.tls.secret.create) (not .Values.istio.enabled) -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-tls-secret
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+type: kubernetes.io/tls
+data:
+ {{ with .Values.ingress.tls -}}
+ tls.crt: {{ .cert | b64enc }}
+ tls.key: {{ .key | b64enc }}
+ {{- end }}
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/istio.ingressgateway.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/istio.ingressgateway.yaml
new file mode 100644
index 0000000..3cb30c6
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/istio.ingressgateway.yaml
@@ -0,0 +1,144 @@
+{{ if .Values.istio.enabled -}}
+{{ if and .Values.istio.tls.enabled .Values.istio.tls.secret.create }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-istio-tls-secret
+ namespace: istio-system
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+type: kubernetes.io/tls
+data:
+ {{ with .Values.istio.tls -}}
+ tls.crt: {{ .cert | b64enc }}
+ tls.key: {{ .key | b64enc }}
+ {{- end }}
+{{- end }}
+---
+apiVersion: networking.istio.io/v1alpha3
+kind: Gateway
+metadata:
+ name: {{ .Release.Name }}-istio-gateway
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ selector:
+ istio: ingressgateway
+ servers:
+ - port:
+ number: 80
+ name: http
+ protocol: HTTP
+ hosts:
+ - {{ .Values.istio.host }}
+ {{ if .Values.istio.tls.enabled }}
+ tls:
+ httpsRedirect: true
+ - port:
+ number: 443
+ name: https
+ protocol: HTTPS
+ hosts:
+ - {{ .Values.istio.host }}
+ tls:
+ mode: SIMPLE
+ {{ if .Values.istio.tls.secret.create }}
+ credentialName: {{ .Release.Name }}-istio-tls-secret
+ {{- else }}
+ credentialName: {{ .Values.istio.tls.secret.name }}
+ {{- end }}
+ {{- end }}
+ {{ if .Values.istio.ssh.enabled }}
+ - port:
+ number: 29418
+ name: ssh
+ protocol: TCP
+ hosts:
+ - {{ .Values.istio.host }}
+ {{- end }}
+---
+apiVersion: networking.istio.io/v1alpha3
+kind: VirtualService
+metadata:
+ name: {{ .Release.Name }}-istio-virtual-service
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ hosts:
+ - {{ .Values.istio.host }}
+ gateways:
+ - {{ .Release.Name }}-istio-gateway
+ http:
+ - name: apache-git-http-backend
+ match:
+ - uri:
+ prefix: "/a/projects/"
+ - uri:
+ regex: "^/(.*)/git-receive-pack$"
+ - uri:
+ regex: "^/(.*)/info/refs$"
+ queryParams:
+ service:
+ exact: git-receive-pack
+ route:
+ - destination:
+ host: {{ .Release.Name }}-git-backend-service.{{ .Release.Namespace }}.svc.cluster.local
+ port:
+ number: 80
+ - name: gerrit-replica
+ route:
+ - destination:
+ host: {{ .Release.Name }}-gerrit-replica-service.{{ .Release.Namespace }}.svc.cluster.local
+ port:
+ number: 80
+ {{ if .Values.istio.ssh.enabled }}
+ tcp:
+ - match:
+ - port: {{ .Values.gerritReplica.service.ssh.port }}
+ route:
+ - destination:
+ host: {{ .Release.Name }}-gerrit-replica-service.{{ .Release.Namespace }}.svc.cluster.local
+ port:
+ number: {{ .Values.gerritReplica.service.ssh.port }}
+ {{- end }}
+---
+apiVersion: networking.istio.io/v1alpha3
+kind: DestinationRule
+metadata:
+ name: {{ .Release.Name }}-gerrit-destination-rule
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ host: {{ .Release.Name }}-gerrit-replica-service.{{ .Release.Namespace }}.svc.cluster.local
+ trafficPolicy:
+ loadBalancer:
+ simple: LEAST_CONN
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/log-cleaner.cronjob.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/log-cleaner.cronjob.yaml
new file mode 100644
index 0000000..cbeb88f
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/log-cleaner.cronjob.yaml
@@ -0,0 +1,69 @@
+{{- if and .Values.logStorage.enabled .Values.logStorage.cleanup.enabled }}
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: {{ .Release.Name }}-log-cleaner
+ labels:
+ app.kubernetes.io/component: log-cleaner
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ schedule: {{ .Values.logStorage.cleanup.schedule | quote }}
+ concurrencyPolicy: "Forbid"
+ jobTemplate:
+ spec:
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: log-cleaner
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 12 }}
+ {{- end }}
+ {{- if .Values.logStorage.cleanup.additionalPodLabels }}
+{{ toYaml .Values.logStorage.cleanup.additionalPodLabels | indent 12 }}
+ {{- end }}
+ {{ if .Values.istio.enabled -}}
+ annotations:
+ sidecar.istio.io/inject: "false"
+ {{- end }}
+ spec:
+ restartPolicy: OnFailure
+ containers:
+ - name: log-cleaner
+ imagePullPolicy: {{ .Values.images.imagePullPolicy }}
+ image: {{ .Values.images.busybox.registry -}}/busybox:{{- .Values.images.busybox.tag }}
+ command:
+ - sh
+ - -c
+ args:
+ - |
+ find /var/logs/ \
+ -mindepth 1 \
+ -type f \
+ -mtime +{{ .Values.logStorage.cleanup.retentionDays }} \
+ -print \
+ -delete
+ find /var/logs/ -type d -empty -delete
+ resources:
+{{ toYaml .Values.logStorage.cleanup.resources | indent 14 }}
+ volumeMounts:
+ - name: logs
+ mountPath: "/var/logs"
+ volumes:
+ - name: logs
+ persistentVolumeClaim:
+ {{- if .Values.logStorage.externalPVC.use }}
+ claimName: {{ .Values.logStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-log-pvc
+ {{- end }}
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/netpol.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/netpol.yaml
new file mode 100644
index 0000000..72a2bbd
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/netpol.yaml
@@ -0,0 +1,248 @@
+{{ if .Values.networkPolicies.enabled -}}
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: {{ .Release.Name }}-default-deny-all
+ labels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ release: {{ .Release.Name }}
+ policyTypes:
+ - Ingress
+ - Egress
+ ingress: []
+ egress: []
+---
+{{ if .Values.networkPolicies.dnsPorts -}}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: {{ .Release.Name }}-allow-dns-access
+ labels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ release: {{ .Release.Name }}
+ policyTypes:
+ - Egress
+ egress:
+ - ports:
+ {{ range .Values.networkPolicies.dnsPorts -}}
+ - port: {{ . }}
+ protocol: UDP
+ - port: {{ . }}
+ protocol: TCP
+ {{ end }}
+{{- end }}
+---
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: gerrit-replica-allow-external
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ release: {{ .Release.Name }}
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ ingress:
+ - ports:
+ - port: 8080
+ from: []
+---
+{{ if or .Values.gitBackend.networkPolicy.ingress -}}
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: git-backend-custom-ingress-policies
+ labels:
+ app.kubernetes.io/component: git-backend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ release: {{ .Release.Name }}
+ app.kubernetes.io/component: git-backend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ ingress:
+{{ toYaml .Values.gitBackend.networkPolicy.ingress | indent 2 }}
+{{- end }}
+---
+{{ if or .Values.gitBackend.networkPolicy.egress -}}
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: git-backend-custom-egress-policies
+ labels:
+ app.kubernetes.io/component: git-backend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ release: {{ .Release.Name }}
+ app.kubernetes.io/component: git-backend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ egress:
+{{ toYaml .Values.gitBackend.networkPolicy.egress | indent 2 }}
+{{- end }}
+---
+{{ if or .Values.gerritReplica.networkPolicy.ingress -}}
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: gerrit-replica-custom-ingress-policies
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ release: {{ .Release.Name }}
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ ingress:
+{{ toYaml .Values.gerritReplica.networkPolicy.ingress | indent 2 }}
+{{- end }}
+---
+{{ if or .Values.gerritReplica.networkPolicy.egress -}}
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: gerrit-replica-custom-egress-policies
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ release: {{ .Release.Name }}
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ egress:
+{{ toYaml .Values.gerritReplica.networkPolicy.egress | indent 2 }}
+{{- end }}
+---
+{{ if or .Values.istio.enabled -}}
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: istio-proxy
+ labels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ policyTypes:
+ - Egress
+ - Ingress
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ release: {{ .Release.Name }}
+ egress:
+ - ports:
+ - protocol: TCP
+ port: 15012
+ ingress:
+ - ports:
+ - protocol: TCP
+ port: 15012
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: {{ .Release.Name }}-istio-ingress
+ labels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ release: {{ .Release.Name }}
+ ingress:
+ - ports:
+ - protocol: TCP
+ port: 80
+ {{ if .Values.istio.ssh.enabled }}
+ - protocol: TCP
+ port: {{ .Values.gerritReplica.service.ssh.port }}
+ {{- end }}
+ from:
+ - namespaceSelector:
+ matchLabels:
+ name: istio-system
+ - podSelector:
+ matchLabels:
+ istio: ingressgateway
+
+{{- end }}
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/nfs.configmap.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/nfs.configmap.yaml
new file mode 100644
index 0000000..32b167b
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/nfs.configmap.yaml
@@ -0,0 +1,28 @@
+{{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Release.Name }}-nfs-configmap
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ idmapd.conf: |-
+ [General]
+
+ Verbosity = 0
+ Pipefs-Directory = /run/rpc_pipefs
+ # set your own domain here, if it differs from FQDN minus hostname
+ Domain = {{ .Values.nfsWorkaround.idDomain }}
+
+ [Mapping]
+
+ Nobody-User = nobody
+ Nobody-Group = nogroup
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/promtail.configmap.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/promtail.configmap.yaml
new file mode 100644
index 0000000..8dac380
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/promtail.configmap.yaml
@@ -0,0 +1,94 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Release.Name }}-promtail-gerrit-configmap
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ promtail.yaml: |-
+ positions:
+ filename: /var/gerrit/logs/promtail-positions.yaml
+
+ client:
+ tls_config:
+ insecure_skip_verify: {{ .Values.promtailSidecar.tls.skipVerify }}
+ {{- if not .Values.promtailSidecar.tls.skipVerify }}
+ ca_file: /etc/promtail/promtail.ca.crt
+ {{- end }}
+ basic_auth:
+ username: {{ .Values.promtailSidecar.loki.user }}
+ password_file: /etc/promtail/promtail.secret
+ scrape_configs:
+ - job_name: gerrit_error
+ static_configs:
+ - targets:
+ - localhost
+ labels:
+ job: gerrit_error
+ __path__: /var/gerrit/logs/error_log.json
+ entry_parser: raw
+ pipeline_stages:
+ - json:
+ expressions:
+ timestamp: '"@timestamp"'
+ message:
+ - template:
+ source: timestamp
+ template: {{`'{{ Replace .Value "," "." 1 }}'`}}
+ - template:
+ source: timestamp
+ template: {{`'{{ Replace .Value "Z" " +0000" 1 }}'`}}
+ - template:
+ source: timestamp
+ template: {{`'{{ Replace .Value "T" " " 1 }}'`}}
+ - timestamp:
+ source: timestamp
+ format: "2006-01-02 15:04:05.999 -0700"
+ - regex:
+ source: message
+ expression: "Gerrit Code Review (?P<gerrit_version>.*) ready"
+ - labels:
+ gerrit_version:
+ - job_name: gerrit_httpd
+ static_configs:
+ - targets:
+ - localhost
+ labels:
+ job: gerrit_httpd
+ __path__: /var/gerrit/logs/httpd_log.json
+ entry_parser: raw
+ pipeline_stages:
+ - json:
+ expressions:
+ timestamp: null
+ - template:
+ source: timestamp
+ template: {{`'{{ Replace .Value "," "." 1 }}'`}}
+ - timestamp:
+ format: 02/Jan/2006:15:04:05.999 -0700
+ source: timestamp
+ - job_name: gerrit_sshd
+ static_configs:
+ - targets:
+ - localhost
+ labels:
+ job: gerrit_sshd
+ __path__: /var/gerrit/logs/sshd_log.json
+ entry_parser: raw
+ pipeline_stages:
+ - json:
+ expressions:
+ timestamp:
+ - template:
+ source: timestamp
+ template: {{`'{{ Replace .Value "," "." 1 }}'`}}
+ - timestamp:
+ source: timestamp
+ format: 2006-01-02 15:04:05.999 -0700
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/promtail.secret.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/promtail.secret.yaml
new file mode 100644
index 0000000..012fb5b
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/promtail.secret.yaml
@@ -0,0 +1,18 @@
+{{ if .Values.promtailSidecar.enabled -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-promtail-secret
+ labels:
+ app.kubernetes.io/component: gerrit-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+type: Opaque
+data:
+ promtail.secret: {{ .Values.promtailSidecar.loki.password | b64enc }}
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/storage.pvc.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/storage.pvc.yaml
new file mode 100644
index 0000000..5f8974e
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/storage.pvc.yaml
@@ -0,0 +1,27 @@
+{{- if not .Values.gitRepositoryStorage.externalPVC.use }}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-git-repositories-pvc
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: {{ .Values.gitRepositoryStorage.size }}
+ storageClassName: {{ .Values.storageClasses.shared.name }}
+{{- end }}
+{{- if and .Values.logStorage.enabled (not .Values.logStorage.externalPVC.use) }}
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-log-pvc
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: {{ .Values.logStorage.size }}
+ storageClassName: {{ .Values.storageClasses.shared.name }}
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/storageclasses.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/storageclasses.yaml
new file mode 100644
index 0000000..fb91856
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/templates/storageclasses.yaml
@@ -0,0 +1,57 @@
+{{ if .Values.storageClasses.default.create -}}
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: {{ .Values.storageClasses.default.name }}
+ labels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+provisioner: {{ .Values.storageClasses.default.provisioner }}
+reclaimPolicy: {{ .Values.storageClasses.default.reclaimPolicy }}
+{{ if .Values.storageClasses.default.parameters -}}
+parameters:
+{{- range $key, $value := .Values.storageClasses.default.parameters }}
+ {{ $key }}: {{ $value }}
+{{- end }}
+{{ if .Values.storageClasses.default.mountOptions -}}
+mountOptions:
+{{- range .Values.storageClasses.default.mountOptions }}
+ - {{ . }}
+{{- end }}
+{{- end }}
+allowVolumeExpansion: {{ .Values.storageClasses.default.allowVolumeExpansion }}
+{{- end }}
+{{- end }}
+---
+{{ if .Values.storageClasses.shared.create -}}
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: {{ .Values.storageClasses.shared.name }}
+ labels:
+ chart: {{ template "gerrit-replica.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+provisioner: {{ .Values.storageClasses.shared.provisioner }}
+reclaimPolicy: {{ .Values.storageClasses.shared.reclaimPolicy }}
+{{ if .Values.storageClasses.shared.parameters -}}
+parameters:
+{{- range $key, $value := .Values.storageClasses.shared.parameters }}
+ {{ $key }}: {{ $value }}
+{{- end }}
+{{ if .Values.storageClasses.shared.mountOptions -}}
+mountOptions:
+{{- range .Values.storageClasses.shared.mountOptions }}
+ - {{ . }}
+{{- end }}
+{{- end }}
+allowVolumeExpansion: {{ .Values.storageClasses.shared.allowVolumeExpansion }}
+{{- end }}
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit-replica/values.yaml b/charts/k8s-gerrit/helm-charts/gerrit-replica/values.yaml
new file mode 100644
index 0000000..3f318f8
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit-replica/values.yaml
@@ -0,0 +1,430 @@
+images:
+ busybox:
+ registry: docker.io
+ tag: latest
+ # Registry used for container images created by this project
+ registry:
+ # The registry name must NOT contain a trailing slash
+ name:
+ ImagePullSecret:
+ # Leave blank, if no ImagePullSecret is needed.
+ name: image-pull-secret
+ # If set to false, the gerrit-replica chart expects either a ImagePullSecret
+ # with the name configured above to be present on the cluster or that no
+ # credentials are needed.
+ create: false
+ username:
+ password:
+ version: latest
+ imagePullPolicy: Always
+ # Additional ImagePullSecrets that already exist and should be used by the
+ # pods of this chart. E.g. to pull busybox from dockerhub.
+ additionalImagePullSecrets: []
+
+# Additional labels that should be applied to all resources
+additionalLabels: {}
+
+storageClasses:
+ # Storage class used for storing logs and other pod-specific persisted data
+ default:
+ # If create is set to false, an existing StorageClass with the given
+ # name is expected to exist in the cluster. Setting create to true will
+ # create a storage class with the parameters given below.
+ name: default
+ create: false
+ provisioner: kubernetes.io/aws-ebs
+ reclaimPolicy: Delete
+ # Use the parameters key to set all parameters needed for the provisioner
+ parameters:
+ type: gp2
+ fsType: ext4
+ mountOptions: []
+ allowVolumeExpansion: false
+ # Storage class used for storing git repositories. Has to provide RWM access.
+ shared:
+ # If create is set to false, an existing StorageClass with RWM access
+ # mode and the given name has to be provided.
+ name: shared-storage
+ create: false
+ provisioner: nfs
+ reclaimPolicy: Delete
+ # Use the parameters key to set all parameters needed for the provisioner
+ parameters:
+ mountOptions: vers=4.1
+ mountOptions: []
+ allowVolumeExpansion: false
+
+nfsWorkaround:
+ enabled: false
+ chownOnStartup: false
+ idDomain: localdomain.com
+
+
+networkPolicies:
+ enabled: false
+ dnsPorts:
+ - 53
+ - 8053
+
+
+gitRepositoryStorage:
+ externalPVC:
+ use: false
+ name: git-repositories-pvc
+ size: 5Gi
+
+
+logStorage:
+ enabled: false
+ externalPVC:
+ use: false
+ name: gerrit-logs-pvc
+ size: 5Gi
+ cleanup:
+ enabled: false
+ additionalPodLabels: {}
+ schedule: "0 0 * * *"
+ retentionDays: 14
+ resources:
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ limits:
+ cpu: 100m
+ memory: 256Mi
+
+
+istio:
+ enabled: false
+ host:
+ tls:
+ enabled: false
+ secret:
+ # If using an external secret, make sure to name the keys `tls.crt`
+ # and `tls.key`, respectively.
+ create: true
+ # `name` will only be used, if `create` is set to false to bind an
+ # existing secret. Otherwise the name will be automatically generated to
+ # avoid conflicts between multiple chart installations.
+ name:
+ # `cert`and `key` will only be used, if the secret will be created by
+ # this chart.
+ cert: |-
+ -----BEGIN CERTIFICATE-----
+
+ -----END CERTIFICATE-----
+ key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+
+ -----END RSA PRIVATE KEY-----
+ ssh:
+ enabled: false
+
+caCert:
+
+ingress:
+ enabled: false
+ host:
+ # The maximum body size to allow for requests. Use "0" to allow unlimited
+ # reuqest body sizes.
+ maxBodySize: 50m
+ additionalAnnotations:
+ kubernetes.io/ingress.class: nginx
+ # nginx.ingress.kubernetes.io/server-alias: example.com
+ # nginx.ingress.kubernetes.io/whitelist-source-range: xxx.xxx.xxx.xxx
+ tls:
+ enabled: false
+ secret:
+ # If using an external secret, make sure to name the keys `tls.crt`
+ # and `tls.key`, respectively.
+ create: true
+ # `name` will only be used, if `create` is set to false to bind an
+ # existing secret. Otherwise the name will be automatically generated to
+ # avoid conflicts between multiple chart installations.
+ name:
+ # `cert`and `key` will only be used, if the secret will be created by
+ # this chart.
+ cert: |-
+ -----BEGIN CERTIFICATE-----
+
+ -----END CERTIFICATE-----
+ key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+
+ -----END RSA PRIVATE KEY-----
+
+promtailSidecar:
+ enabled: false
+ image: grafana/promtail
+ version: 1.3.0
+ resources:
+ requests:
+ cpu: 100m
+ memory: 128Mi
+ limits:
+ cpu: 200m
+ memory: 128Mi
+ tls:
+ skipVerify: true
+ loki:
+ url: loki.example.com
+ user: admin
+ password: secret
+
+
+gitBackend:
+ image: k8sgerrit/apache-git-http-backend
+
+ additionalPodLabels: {}
+ tolerations: []
+ topologySpreadConstraints: {}
+ nodeSelector: {}
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - git-backend
+ topologyKey: "topology.kubernetes.io/zone"
+
+ replicas: 1
+ maxSurge: 25%
+ # For just one replica, 100 % unavailability has to be allowed for updates to
+ # work.
+ maxUnavailable: 100%
+
+ # The general NetworkPolicy rules implemented by this chart may be too restrictive
+ # for some setups. Here custom rules may be added to whitelist some additional
+ # connections.
+ networkPolicy:
+ # This allows ingress traffic from all sources. If possible, this should be
+ # limited to the respective primary Gerrit that replicates to this replica.
+ ingress:
+ - {}
+ egress: []
+
+ resources:
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ limits:
+ cpu: 100m
+ memory: 256Mi
+
+ livenessProbe:
+ initialDelaySeconds: 10
+ periodSeconds: 5
+
+ readinessProbe:
+ initialDelaySeconds: 5
+ periodSeconds: 1
+
+ service:
+ additionalAnnotations: {}
+ loadBalancerSourceRanges: []
+ type: NodePort
+ externalTrafficPolicy: Cluster
+ http:
+ port: 80
+
+ credentials:
+ # example: user: 'git'; password: 'secret'
+ # run `man htpasswd` to learn about how to create .htpasswd-files
+ htpasswd: git:$apr1$O/LbLKC7$Q60GWE7OcqSEMSfe/K8xU.
+ # TODO: Create htpasswd-file on container startup instead and set user
+ # and password in values.yaml.
+ #user:
+ #password:
+
+
+gitGC:
+ image: k8sgerrit/git-gc
+
+ tolerations: []
+ nodeSelector: {}
+ affinity: {}
+ additionalPodLabels: {}
+
+ schedule: 0 6,18 * * *
+
+ resources:
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ limits:
+ cpu: 100m
+ memory: 256Mi
+
+gerritReplica:
+ images:
+ gerritInit: k8sgerrit/gerrit-init
+ gerritReplica: k8sgerrit/gerrit
+
+ tolerations: []
+ topologySpreadConstraints: {}
+ nodeSelector: {}
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - gerrit-replica
+ topologyKey: "topology.kubernetes.io/zone"
+
+ replicas: 1
+ updatePartition: 0
+ additionalAnnotations: {}
+ additionalPodLabels: {}
+
+ livenessProbe:
+ initialDelaySeconds: 60
+ periodSeconds: 5
+
+ readinessProbe:
+ initialDelaySeconds: 10
+ periodSeconds: 10
+
+ startupProbe:
+ initialDelaySeconds: 10
+ periodSeconds: 30
+
+ gracefulStopTimeout: 90
+
+ # The memory limit has to be higher than the configures heap-size for Java!
+ resources:
+ requests:
+ cpu: 1
+ memory: 5Gi
+ limits:
+ cpu: 1
+ memory: 6Gi
+
+ persistence:
+ enabled: true
+ size: 5Gi
+
+ # The general NetworkPolicy rules implemented by this chart may be too restrictive
+ # for some setups, e.g. when trying to connect to an external database. Here
+ # custom rules may be added to whitelist some additional connections.
+ networkPolicy:
+ ingress: []
+ egress: []
+
+ service:
+ additionalAnnotations: {}
+ loadBalancerSourceRanges: []
+ type: NodePort
+ externalTrafficPolicy: Cluster
+ http:
+ port: 80
+ ssh:
+ enabled: false
+ port: 29418
+
+ # `gerritReplica.keystore` expects a base64-encoded Java-keystore
+ # Since Java keystores are binary files, adding the unencoded content and
+ # automatic encoding using helm does not work here.
+ keystore:
+
+ pluginManagement:
+ plugins: []
+ # A plugin packaged in the gerrit.war-file
+ # - name: download-commands
+
+ # A plugin packaged in the gerrit.war-file that will also be installed as a
+ # lib
+ # - name: replication
+ # installAsLibrary: true
+
+ # A plugin that will be downloaded on startup
+ # - name: delete-project
+ # url: https://example.com/gerrit-plugins/delete-project.jar
+ # sha1:
+ # installAsLibrary: false
+
+ # Only downloaded plugins will be cached. This will be ignored, if no plugins
+ # are downloaded.
+ libs: []
+ cache:
+ enabled: false
+ size: 1Gi
+
+ priorityClassName:
+
+ etc:
+ # Some values are expected to have a specific value for the deployment installed
+ # by this chart to work. These are marked with `# FIXED`.
+ # Do not change them!
+ config:
+ gerrit.config: |-
+ [gerrit]
+ basePath = git # FIXED
+ serverId = gerrit-replica-1
+ # The canonical web URL has to be set to the Ingress host, if an Ingress
+ # is used. If a LoadBalancer-service is used, this should be set to the
+ # LoadBalancer's external IP. This can only be done manually after installing
+ # the chart, when you know the external IP the LoadBalancer got from the
+ # cluster.
+ canonicalWebUrl = http://example.com/
+ disableReverseDnsLookup = true
+ [index]
+ type = LUCENE
+ [index "scheduledIndexer"]
+ runOnStartup = false
+ [auth]
+ type = DEVELOPMENT_BECOME_ANY_ACCOUNT
+ [httpd]
+ # If using an ingress use proxy-http or proxy-https
+ listenUrl = proxy-http://*:8080/
+ requestLog = true
+ gracefulStopTimeout = 1m
+ [sshd]
+ listenAddress = *:29418
+ gracefulStopTimeout = 1m
+ [transfer]
+ timeout = 120 s
+ [user]
+ name = Gerrit Code Review
+ email = gerrit@example.com
+ anonymousCoward = Unnamed User
+ [cache]
+ directory = cache
+ [container]
+ user = gerrit # FIXED
+ replica = true # FIXED
+ javaHome = /usr/lib/jvm/java-11-openjdk # FIXED
+ javaOptions = -Djavax.net.ssl.trustStore=/var/gerrit/etc/keystore # FIXED
+ javaOptions = -Xms200m
+ # Has to be lower than 'gerritReplica.resources.limits.memory'. Also
+ # consider memories used by other applications in the container.
+ javaOptions = -Xmx4g
+
+ secret:
+ secure.config: |-
+ # Password for the keystore added as value for 'gerritReplica.keystore'
+ # Only needed, if SSL is enabled.
+ #[httpd]
+ # sslKeyPassword = gerrit
+
+ # ssh_host_ecdsa_key: |-
+ # -----BEGIN EC PRIVATE KEY-----
+
+ # -----END EC PRIVATE KEY-----
+
+ # ssh_host_ecdsa_key.pub: ecdsa-sha2-nistp256...
+
+ additionalConfigMaps:
+ # - name:
+ # subDir:
+ # data:
+ # file.txt: test
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/.helmignore b/charts/k8s-gerrit/helm-charts/gerrit/.helmignore
new file mode 100644
index 0000000..4f4562f
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/.helmignore
@@ -0,0 +1,24 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+
+docs/
+supplements/
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/Chart.yaml b/charts/k8s-gerrit/helm-charts/gerrit/Chart.yaml
new file mode 100644
index 0000000..d41771f
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v2
+appVersion: 3.8.0
+description: |-
+ Gerrit is a free, web-based team code collaboration tool. Software developers
+ in a team can review each other's modifications on their source code using
+ a Web browser and approve or reject those changes. It integrates closely with
+ Git, a distributed version control system. [1]
+
+ [1](https://en.wikipedia.org/wiki/Gerrit_(software)
+name: gerrit
+version: 0.2.0
+maintainers:
+- name: Thomas Draebing
+ email: thomas.draebing@sap.com
+- name: Matthias Sohn
+ email: matthias.sohn@sap.com
+- name: Sasa Zivkov
+ email: sasa.zivkov@sap.com
+- name: Christian Halstrick
+ email: christian.halstrick@sap.com
+home: https://gerrit.googlesource.com/k8s-gerrit/+/master/helm-charts/gerrit-replica
+icon: http://commondatastorage.googleapis.com/gerrit-static/diffy-w200.png
+sources:
+- https://gerrit.googlesource.com/k8s-gerrit/+/master/
+keywords:
+- gerrit
+- git
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/LICENSE b/charts/k8s-gerrit/helm-charts/gerrit/LICENSE
new file mode 100644
index 0000000..028fc9f
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright (C) 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/README.md b/charts/k8s-gerrit/helm-charts/gerrit/README.md
new file mode 100644
index 0000000..110383a
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/README.md
@@ -0,0 +1,459 @@
+# Gerrit on Kubernetes
+
+Gerrit is a web-based code review tool, which acts as a Git server. This helm
+chart provides a Gerrit setup that can be deployed on Kubernetes.
+In addition, the chart provides a CronJob to perform Git garbage collection.
+
+***note
+Gerrit versions before 3.0 are no longer supported, since the support of ReviewDB
+was removed.
+***
+
+## Prerequisites
+
+- Helm (>= version 3.0)
+
+ (Check out [this guide](https://docs.helm.sh/using_helm/#quickstart-guide)
+ how to install and use helm.)
+
+- Access to a provisioner for persistent volumes with `Read Write Many (RWM)`-
+ capability.
+
+ A list of applicaple volume types can be found
+ [here](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes).
+ This project was developed using the
+ [NFS-server-provisioner helm chart](https://github.com/helm/charts/tree/master/stable/nfs-server-provisioner),
+ a NFS-provisioner deployed in the Kubernetes cluster itself. Refer to
+ [this guide](/helm-charts/gerrit/docs/nfs-provisioner.md) of how to
+ deploy it in context of this project.
+
+- A domain name that is configured to point to the IP address of the node running
+ the Ingress controller on the kubernetes cluster (as described
+ [here](http://alesnosek.com/blog/2017/02/14/accessing-kubernetes-pods-from-outside-of-the-cluster/)).
+
+- (Optional: Required, if SSL is configured)
+ A [Java keystore](https://gerrit-review.googlesource.com/Documentation/config-gerrit.html#httpd.sslKeyStore)
+ to be used by Gerrit.
+
+## Installing the Chart
+
+***note
+**ATTENTION:** The value for `ingress.host` is required for rendering
+the chart's templates. The nature of the value does not allow defaults.
+Thus a custom `values.yaml`-file setting this value is required!
+***
+
+To install the chart with the release name `gerrit`, execute:
+
+```sh
+cd $(git rev-parse --show-toplevel)/helm-charts
+helm install \
+ gerrit \ # release name
+ ./gerrit \ # path to chart
+ -f <path-to-custom-values>.yaml
+```
+
+The command deploys the Gerrit instance on the current Kubernetes cluster.
+The [configuration section](#Configuration) lists the parameters that can be
+configured during installation.
+
+## Configuration
+
+The following sections list the configurable values in `values.yaml`. To configure
+a Gerrit setup, make a copy of the `values.yaml`-file and change the parameters
+as needed. The configuration can be applied by installing the chart as described
+[above](#Installing-the-chart).
+
+In addition, single options can be set without creating a custom `values.yaml`:
+
+```sh
+cd $(git rev-parse --show-toplevel)/helm-charts
+helm install \
+ gerrit \ # release name
+ ./gerrit \ # path to chart
+ --set=gitRepositoryStorage.size=100Gi
+```
+
+### Container images
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `images.busybox.registry` | The registry to pull the busybox container images from | `docker.io` |
+| `images.busybox.tag` | The busybox image tag to use | `latest` |
+| `images.registry.name` | The image registry to pull the container images from | `` |
+| `images.registry.ImagePullSecret.name` | Name of the ImagePullSecret | `image-pull-secret` (if empty no image pull secret will be deployed) |
+| `images.registry.ImagePullSecret.create` | Whether to create an ImagePullSecret | `false` |
+| `images.registry.ImagePullSecret.username` | The image registry username | `nil` |
+| `images.registry.ImagePullSecret.password` | The image registry password | `nil` |
+| `images.version` | The image version (image tag) to use | `latest` |
+| `images.imagePullPolicy` | Image pull policy | `Always` |
+| `images.additionalImagePullSecrets` | Additional image pull policies that pods should use | `[]` |
+
+### Labels
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `additionalLabels` | Additional labels for resources managed by this Helm chart | `{}` |
+
+### Storage classes
+
+For information of how a `StorageClass` is configured in Kubernetes, read the
+[official Documentation](https://kubernetes.io/docs/concepts/storage/storage-classes/#introduction).
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `storageClasses.default.name` | The name of the default StorageClass (RWO) | `default` |
+| `storageClasses.default.create` | Whether to create the StorageClass | `false` |
+| `storageClasses.default.provisioner` | Provisioner of the StorageClass | `kubernetes.io/aws-ebs` |
+| `storageClasses.default.reclaimPolicy` | Whether to `Retain` or `Delete` volumes, when they become unbound | `Delete` |
+| `storageClasses.default.parameters` | Parameters for the provisioner | `parameters.type: gp2`, `parameters.fsType: ext4` |
+| `storageClasses.default.mountOptions` | The mount options of the default StorageClass | `[]` |
+| `storageClasses.default.allowVolumeExpansion` | Whether to allow volume expansion. | `false` |
+| `storageClasses.shared.name` | The name of the shared StorageClass (RWM) | `shared-storage` |
+| `storageClasses.shared.create` | Whether to create the StorageClass | `false` |
+| `storageClasses.shared.provisioner` | Provisioner of the StorageClass | `nfs` |
+| `storageClasses.shared.reclaimPolicy` | Whether to `Retain` or `Delete` volumes, when they become unbound | `Delete` |
+| `storageClasses.shared.parameters` | Parameters for the provisioner | `parameters.mountOptions: vers=4.1` |
+| `storageClasses.shared.mountOptions` | The mount options of the shared StorageClass | `[]` |
+| `storageClasses.shared.allowVolumeExpansion` | Whether to allow volume expansion. | `false` |
+
+### Network policies
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `networkPolicies.enabled` | Whether to enable preconfigured NetworkPolicies | `false` |
+| `networkPolicies.dnsPorts` | List of ports used by DNS-service (e.g. KubeDNS) | `[53, 8053]` |
+
+The NetworkPolicies provided here are quite strict and do not account for all
+possible scenarios. Thus, custom NetworkPolicies have to be added, e.g. for
+allowing Gerrit to replicate to a Gerrit replica. By default, the egress traffic
+of the gerrit pod is blocked, except for connections to the DNS-server.
+Thus, replication which requires Gerrit to perform git pushes to the replica will
+not work. The chart provides the possibility to define custom rules for egress-
+traffic of the gerrit pod under `gerrit.networkPolicy.egress`.
+Depending on the scenario, there are different ways to allow the required
+connections. The easiest way is to allow all egress-traffic for the gerrit
+pods:
+
+```yaml
+gerrit:
+ networkPolicy:
+ egress:
+ - {}
+```
+
+If the remote that is replicated to is running in a pod on the same cluster and
+the service-DNS is used as the remote's URL (e.g. http://gerrit-replica-git-backend-service:80/git/${name}.git),
+a podSelector (and namespaceSelector, if the pod is running in a different
+namespace) can be used to whitelist the traffic:
+
+```yaml
+gerrit:
+ networkPolicy:
+ egress:
+ - to:
+ - podSelector:
+ matchLabels:
+ app: git-backend
+```
+
+If the remote is outside the cluster, the IP of the remote or its load balancer
+can also be whitelisted, e.g.:
+
+```yaml
+gerrit:
+ networkPolicy:
+ egress:
+ - to:
+ - ipBlock:
+ cidr: xxx.xxx.0.0/16
+```
+
+The same principle also applies to other use cases, e.g. connecting to a database.
+For more information about the NetworkPolicy resource refer to the
+[Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/).
+
+### Workaround for NFS
+
+Kubernetes will not always be able to adapt the ownership of the files within NFS
+volumes. Thus, a workaround exists that will add init-containers to
+adapt file ownership. Note, that only the ownership of the root directory of the
+volume will be changed. All data contained within will be expected to already be
+owned by the user used by Gerrit. Also the ID-domain will be configured to ensure
+correct ID-mapping.
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `nfsWorkaround.enabled` | Whether the volume used is an NFS-volume | `false` |
+| `nfsWorkaround.chownOnStartup` | Whether to chown the volume on pod startup | `false` |
+| `nfsWorkaround.idDomain` | The ID-domain that should be used to map user-/group-IDs for the NFS mount | `localdomain.com` |
+
+### Storage for Git repositories
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `gitRepositoryStorage.externalPVC.use` | Whether to use a PVC deployed outside the chart | `false` |
+| `gitRepositoryStorage.externalPVC.name` | Name of the external PVC | `git-repositories-pvc` |
+| `gitRepositoryStorage.size` | Size of the volume storing the Git repositories | `5Gi` |
+
+If the git repositories should be persisted even if the chart is deleted and in
+a way that the volume containing them can be mounted by the reinstalled chart,
+the PVC claiming the volume has to be created independently of the chart. To use
+the external PVC, set `gitRepositoryStorage.externalPVC.enabled` to `true` and
+give the name of the PVC under `gitRepositoryStorage.externalPVC.name`.
+
+### Storage for Logs
+
+The logs can be stored in a dedicated persistent volume. This volume has to be a
+read-write-many volume to be able to be used by multiple pods.
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `logStorage.enabled` | Whether to enable persistence of logs | `false` |
+| `logStorage.externalPVC.use` | Whether to use a PVC deployed outside the chart | `false` |
+| `logStorage.externalPVC.name` | Name of the external PVC | `gerrit-logs-pvc` |
+| `logStorage.size` | Size of the volume | `5Gi` |
+| `logStorage.cleanup.enabled` | Whether to regularly delete old logs | `false` |
+| `logStorage.cleanup.schedule` | Cron schedule defining when to run the cleanup job | `0 0 * * *` |
+| `logStorage.cleanup.retentionDays` | Number of days to retain the logs | `14` |
+| `logStorage.cleanup.resources` | Resources the container is allowed to use | `requests.cpu: 100m` |
+| `logStorage.cleanup.additionalPodLabels` | Additional labels for pods | `{}` |
+| | | `requests.memory: 256Mi` |
+| | | `limits.cpu: 100m` |
+| | | `limits.memory: 256Mi` |
+
+Each pod will create a separate folder for its logs, allowing to trace logs to
+the respective pods.
+
+### CA certificate
+
+Some application may require TLS verification. If the default CA built into the
+containers is not enough a custom CA certificate can be given to the deployment.
+Note, that Gerrit will require its CA in a JKS keytore, which is described below.
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `caCert` | CA certificate for TLS verification (if not set, the default will be used) | `None` |
+
+### Ingress
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `ingress.enabled` | Whether to enable the Ingress | `false` |
+| `ingress.host` | REQUIRED: Host name to use for the Ingress (required for Ingress) | `nil` |
+| `ingress.additionalAnnotations` | Additional annotations for the Ingress | `nil` |
+| `ingress.tls.enabled` | Whether to enable TLS termination in the Ingress | `false` |
+| `ingress.tls.secret.create` | Whether to create a TLS-secret | `true` |
+| `ingress.tls.secret.name` | Name of an external secret that will be used as a TLS-secret | `nil` |
+| `ingress.tls.cert` | Public SSL server certificate | `-----BEGIN CERTIFICATE-----` |
+| `ingress.tls.key` | Private SSL server certificate | `-----BEGIN RSA PRIVATE KEY-----` |
+
+***note
+For graceful shutdown to work with an ingress, the ingress controller has to be
+configured to gracefully close the connections as well.
+***
+
+### Git garbage collection
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `gitGC.image` | Image name of the Git-GC container image | `k8sgerrit/git-gc` |
+| `gitGC.schedule` | Cron-formatted schedule with which to run Git garbage collection | `0 6,18 * * *` |
+| `gitGC.resources` | Configure the amount of resources the pod requests/is allowed | `requests.cpu: 100m` |
+| | | `requests.memory: 256Mi` |
+| | | `limits.cpu: 100m` |
+| | | `limits.memory: 256Mi` |
+| `gitGC.logging.persistence.enabled` | Whether to persist logs | `true` |
+| `gitGC.logging.persistence.size` | Storage size for persisted logs | `1Gi` |
+| `gitGC.tolerations` | Taints and tolerations work together to ensure that pods are not scheduled onto inappropriate nodes. For more information, please refer to the following documents. [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration) | [] |
+| `gitGC.nodeSelector` | Assigns a Pod to the specified Nodes. For more information, please refer to the following documents. [Assign Pods to Nodes](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/). [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) | {} |
+| `gitGC.affinity` | Assigns a Pod to the specified Nodes. For more information, please refer to the following documents. [Assign Pods to Nodes using Node Affinity](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/). [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) | {} |
+| `gitGC.additionalPodLabels` | Additional labels for Pods | `{}` |
+
+### Gerrit
+
+***note
+The way the Jetty servlet used by Gerrit works, the Gerrit component of the
+gerrit chart actually requires the URL to be known, when the chart is installed.
+The suggested way to do that is to use the provided Ingress resource. This requires
+that a URL is available and that the DNS is configured to point the URL to the
+IP of the node the Ingress controller is running on!
+***
+
+***note
+Setting the canonical web URL in the gerrit.config to the host used for the Ingress
+is mandatory, if access to Gerrit is required!
+***
+
+***note
+While the chart allows to configure multiple replica for the Gerrit StatefulSet,
+scaling of Gerrit is currently not supported, since no mechanism to guarantee a
+consistent state is currently in place. This is planned to be implemented in the
+future.
+***
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `gerrit.images.gerritInit` | Image name of the Gerrit init container image | `k8sgerrit/gerrit-init` |
+| `gerrit.images.gerrit` | Image name of the Gerrit container image | `k8sgerrit/gerrit` |
+| `gerrit.tolerations` | Taints and tolerations work together to ensure that pods are not scheduled onto inappropriate nodes. For more information, please refer to the following documents. [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration) | [] |
+| `gerrit.topologySpreadConstraints` | Control how Pods are spread across your cluster among failure-domains. For more information, please refer to the following documents. [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints) | {} |
+| `gerrit.nodeSelector` | Assigns a Pod to the specified Nodes. For more information, please refer to the following documents. [Assign Pods to Nodes](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/). [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) | {} |
+| `gerrit.affinity` | Assigns a Pod to the specified Nodes. For more information, please refer to the following documents. [Assign Pods to Nodes using Node Affinity](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/). [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) | {} |
+| `gerrit.additionalAnnotations` | Additional annotations for the Pods | {} |
+| `gerrit.additionalPodLabels` | Additional labels for Pods | `{}` |
+| `gerrit.replicas` | Number of replica pods to deploy | `1` |
+| `gerrit.updatePartition` | Ordinal at which to start updating pods. Pods with a lower ordinal will not be updated. | `0` |
+| `gerrit.resources` | Configure the amount of resources the pod requests/is allowed | `requests.cpu: 1` |
+| | | `requests.memory: 5Gi` |
+| | | `limits.cpu: 1` |
+| | | `limits.memory: 6Gi` |
+| `gerrit.persistence.enabled` | Whether to persist the Gerrit site | `true` |
+| `gerrit.persistence.size` | Storage size for persisted Gerrit site | `10Gi` |
+| `gerrit.livenessProbe` | Configuration of the liveness probe timings | `{initialDelaySeconds: 30, periodSeconds: 5}` |
+| `gerrit.readinessProbe` | Configuration of the readiness probe timings | `{initialDelaySeconds: 5, periodSeconds: 1}` |
+| `gerrit.startupProbe` | Configuration of the startup probe timings | `{initialDelaySeconds: 10, periodSeconds: 5}` |
+| `gerrit.gracefulStopTimeout` | Time in seconds Kubernetes will wait until killing the pod during termination (has to be longer then Gerrit's httpd.gracefulStopTimeout to allow graceful shutdown of Gerrit) | `90` |
+| `gerrit.networkPolicy.ingress` | Custom ingress-network policy for gerrit pods | `nil` |
+| `gerrit.networkPolicy.egress` | Custom egress-network policy for gerrit pods | `nil` |
+| `gerrit.service.additionalAnnotations` | Additional annotations for the Service | `{}` |
+| `gerrit.service.loadBalancerSourceRanges` | The list of allowed IPs for the Service | `[]` |
+| `gerrit.service.type` | Which kind of Service to deploy | `NodePort` |
+| `gerrit.service.externalTrafficPolicy` | Specify how traffic from external is handled | `Cluster` |
+| `gerrit.service.http.port` | Port over which to expose HTTP | `80` |
+| `gerrit.service.ssh.enabled` | Whether to enable SSH | `false` |
+| `gerrit.service.ssh.port` | Port over which to expose SSH | `29418` |
+| `gerrit.keystore` | base64-encoded Java keystore (`cat keystore.jks \| base64`) to be used by Gerrit, when using SSL | `nil` |
+| `gerrit.index.type` | Index type used by Gerrit (either `lucene` or `elasticsearch`) | `lucene` |
+| `gerrit.pluginManagement.plugins` | List of Gerrit plugins to install | `[]` |
+| `gerrit.pluginManagement.plugins[0].name` | Name of plugin | `nil` |
+| `gerrit.pluginManagement.plugins[0].url` | Download url of plugin. If given the plugin will be downloaded, otherwise it will be installed from the gerrit.war-file. | `nil` |
+| `gerrit.pluginManagement.plugins[0].sha1` | SHA1 sum of plugin jar used to ensure file integrity and version (optional) | `nil` |
+| `gerrit.pluginManagement.plugins[0].installAsLibrary` | Whether the plugin should be symlinked to the lib-dir in the Gerrit site. | `nil` |
+| `gerrit.pluginManagement.libs` | List of Gerrit library modules to install | `[]` |
+| `gerrit.pluginManagement.libs[0].name` | Name of the lib module | `nil` |
+| `gerrit.pluginManagement.libs[0].url` | Download url of lib module. | `nil` |
+| `gerrit.pluginManagement.libs[0].sha1` | SHA1 sum of plugin jar used to ensure file integrity and version | `nil` |
+| `gerrit.pluginManagement.cache.enabled` | Whether to cache downloaded plugins | `false` |
+| `gerrit.pluginManagement.cache.size` | Size of the volume used to store cached plugins | `1Gi` |
+| `gerrit.priorityClassName` | Name of the PriorityClass to apply to the master pod | `nil` |
+| `gerrit.etc.config` | Map of config files (e.g. `gerrit.config`) that will be mounted to `$GERRIT_SITE/etc`by a ConfigMap | `{gerrit.config: ..., replication.config: ...}`[see here](#Gerrit-config-files) |
+| `gerrit.etc.secret` | Map of config files (e.g. `secure.config`) that will be mounted to `$GERRIT_SITE/etc`by a Secret | `{secure.config: ...}` [see here](#Gerrit-config-files) |
+| `gerrit.additionalConfigMaps` | Allows to mount additional ConfigMaps into a subdirectory of `$SITE/data` | `[]` |
+| `gerrit.additionalConfigMaps[*].name` | Name of the ConfigMap | `nil` |
+| `gerrit.additionalConfigMaps[*].subDir` | Subdirectory under `$SITE/data` into which the files should be symlinked | `nil` |
+| `gerrit.additionalConfigMaps[*].data` | Data of the ConfigMap. If not set, ConfigMap has to be created manually | `nil` |
+
+### Gerrit config files
+
+The gerrit chart provides a ConfigMap containing the configuration files
+used by Gerrit, e.g. `gerrit.config` and a Secret containing sensitive configuration
+like the `secure.config` to configure the Gerrit installation in the Gerrit
+component. The content of the config files can be set in the `values.yaml` under
+the keys `gerrit.etc.config` and `gerrit.etc.secret` respectively.
+The key has to be the filename (eg. `gerrit.config`) and the file's contents
+the value. This way an arbitrary number of configuration files can be loaded into
+the `$GERRIT_SITE/etc`-directory, e.g. for plugins.
+All configuration options for Gerrit are described in detail in the
+[official documentation of Gerrit](https://gerrit-review.googlesource.com/Documentation/config-gerrit.html).
+Some options however have to be set in a specified way for Gerrit to work as
+intended with the chart:
+
+- `gerrit.basePath`
+
+ Path to the directory containing the repositories. The chart mounts this
+ directory from a persistent volume to `/var/gerrit/git` in the container. For
+ Gerrit to find the correct directory, this has to be set to `git`.
+
+- `gerrit.serverId`
+
+ In Gerrit-version higher than 2.14 Gerrit needs a server ID, which is used by
+ NoteDB. Gerrit would usually generate a random ID on startup, but since the
+ gerrit.config file is read only, when mounted as a ConfigMap this fails.
+ Thus the server ID has to be set manually!
+
+- `gerrit.canonicalWebUrl`
+
+ The canonical web URL has to be set to the Ingress host.
+
+- `httpd.listenURL`
+
+ This has to be set to `proxy-http://*:8080/` or `proxy-https://*:8080`,
+ depending of TLS is enabled in the Ingress or not, otherwise the Jetty
+ servlet will run into an endless redirect loop.
+
+- `httpd.gracefulStopTimeout` / `sshd.gracefulStopTimeout`
+
+ To enable graceful shutdown of the embedded jetty server and SSHD, a timeout
+ has to be set with this option. This will be the maximum time, Gerrit will wait
+ for HTTP requests to finish before shutdown.
+
+- `container.user`
+
+ The technical user in the Gerrit container is called `gerrit`. Thus, this
+ value is required to be `gerrit`.
+
+- `container.javaHome`
+
+ This has to be set to `/usr/lib/jvm/java-11-openjdk-amd64`, since this is
+ the path of the Java installation in the container.
+
+- `container.javaOptions`
+
+ The maximum heap size has to be set. And its value has to be lower than the
+ memory resource limit set for the container (e.g. `-Xmx4g`). In your calculation,
+ allow memory for other components running in the container.
+
+To enable liveness- and readiness probes, the healthcheck plugin will be installed
+by default. Note, that by configuring to use a packaged or downloaded version of
+the healthcheck plugin, the configured version will take precedence over the default
+version. The plugin is by default configured to disable the `querychanges` and
+`auth` healthchecks, since these would not work on a new and empty Gerrit server.
+The default configuration can be overwritten by adding the `healthcheck.config`
+file as a key-value pair to `gerrit.etc.config` as for every other configuration.
+
+SSH keys should be configured via the helm-chart using the `gerrit.etc.secret`
+map. Gerrit will create its own keys, if none are present in the site, but if
+multiple Gerrit pods are running, each Gerrit instance would have its own keys.
+Users accessing Gerrit via a load balancer would get issues due to changing
+host keys.
+
+### Installing Gerrit plugins
+
+There are several different ways to install plugins for Gerrit:
+
+- **RECOMMENDED: Package the plugins to install into the WAR-file containing Gerrit.**
+ This method provides the most stable way to install plugins, but requires to
+ use a custom built gerrit-war file and container images, if plugins are required
+ that are not part of the official `release.war`-file.
+
+- **Download and cache plugins.** The chart supports downloading the plugin files and
+ to cache them in a separate volume, that is shared between Gerrit-pods. SHA1-
+ sums are used to validate plugin-files and versions.
+
+- **Download plugins, but do not cache them.** This should only be used during
+ development to save resources (the shared volume). Each pod will download the
+ plugin-files on its own. Pods will fail to start up, if the download-URL is
+ not valid anymore at some point in time.
+
+## Upgrading the Chart
+
+To upgrade an existing installation of the gerrit chart, e.g. to install
+a newer chart version or to use an updated custom `values.yaml`-file, execute
+the following command:
+
+```sh
+cd $(git rev-parse --show-toplevel)/helm-charts
+helm upgrade \
+ <release-name> \
+ ./gerrit \ # path to chart
+ -f <path-to-custom-values>.yaml
+```
+
+## Uninstalling the Chart
+
+To delete the chart from the cluster, use:
+
+```sh
+helm delete <release-name>
+```
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/docs/nfs-provisioner.md b/charts/k8s-gerrit/helm-charts/gerrit/docs/nfs-provisioner.md
new file mode 100644
index 0000000..9e83d47
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/docs/nfs-provisioner.md
@@ -0,0 +1,64 @@
+# Installing a NFS-provisioner
+
+Gerrit requires access to a persistent volume capable of running in
+`Read Write Many (RWM)`-mode to store the git repositories, since the repositories
+have to be accessed by mutiple pods. One possibility to provide such volumes
+is to install a provisioner for NFS-volumes into the same Kubernetes-cluster.
+This document will guide through the process.
+
+The [Kubernetes external-storage project](https://github.com/kubernetes-incubator/external-storage)
+provides an out-of-tree dynamic [provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs)
+for NFS volumes. A chart exists for easy deployment of the project onto a
+Kubernetes cluster. The chart's sources can be found [here](https://github.com/helm/charts/tree/master/stable/nfs-server-provisioner).
+
+## Prerequisites
+
+This guide will use Helm to install the NFS-provisioner. Thus, Helm has to be
+installed.
+
+## Installing the nfs-server-provisioner chart
+
+A custom `values.yaml`-file containing a configuration tested with the
+gerrit charts can be found in the `supplements/nfs`-directory in the
+gerrit chart's root directory. In addition a file stating the tested
+version of the nfs-server-provisioner chart is present in the same directory.
+
+If needed, adapt the `values.yaml`-file for the nfs-server-provisioner chart
+further and then run:
+
+```sh
+cd $(git rev-parse --show-toplevel)/helm-charts/gerrit/supplements/nfs
+helm install nfs \
+ stable/nfs-server-provisioner \
+ -f values.yaml \
+ --version $(cat VERSION)
+```
+
+For a description of the configuration options, refer to the
+[chart's documentation](https://github.com/helm/charts/blob/master/stable/nfs-server-provisioner/README.md).
+
+Here are some tips for configuring the nfs-server-provisioner chart to work with
+the gerrit chart:
+
+- Deploying more than 1 `replica` led to some reliability issues in tests and
+ should be further tested for now, if required.
+- The name of the StorageClass created for NFS-volumes has to be the same as the
+ one defined in the gerrit chart for `storageClasses.shared.name`
+- The StorageClas for NFS-volumes needs to have the parameter `mountOptions: vers=4.1`,
+ due to compatibility [issues](https://github.com/kubernetes-incubator/external-storage/issues/223)
+ with Ganesha.
+
+## Deleting the nfs-server-provisioner chart
+
+***note
+**Attention:** Never delete the nfs-server-provisioner chart, if there is still a
+PersistentVolumeClaim and Pods using a NFS-volume provisioned by the NFS server
+provisioner. This will lead to crashed pods, that will not be terminated correctly.
+***
+
+If no Pod or PVC is using a NFS-volume provisioned by the NFS server provisioner
+anymore, delete it like any other chart:
+
+```sh
+helm delete nfs
+```
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/supplements/nfs/VERSION b/charts/k8s-gerrit/helm-charts/gerrit/supplements/nfs/VERSION
new file mode 100644
index 0000000..7dff5b8
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/supplements/nfs/VERSION
@@ -0,0 +1 @@
+0.2.1
\ No newline at end of file
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/supplements/nfs/values.yaml b/charts/k8s-gerrit/helm-charts/gerrit/supplements/nfs/values.yaml
new file mode 100644
index 0000000..a413d8a
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/supplements/nfs/values.yaml
@@ -0,0 +1,42 @@
+# Deploying more than 1 `replica` led to some reliability issues in tests and
+# should be further tested for now, if required.
+replicaCount: 1
+
+image:
+ repository: quay.io/kubernetes_incubator/nfs-provisioner
+ tag: v1.0.9
+ pullPolicy: IfNotPresent
+
+service:
+ type: ClusterIP
+ nfsPort: 2049
+ mountdPort: 20048
+ rpcbindPort: 51413
+
+persistence:
+ enabled: true
+ storageClass: default
+ accessMode: ReadWriteOnce
+ size: 7.5Gi
+
+storageClass:
+ create: true
+ defaultClass: false
+ # The name of the StorageClass has to be the same as the one defined in the
+ # gerrit chart for `storageClasses.shared.name`
+ name: shared-storage
+ parameters:
+ # Required!
+ mountOptions: vers=4.1
+ reclaimPolicy: Delete
+
+rbac:
+ create: true
+
+resources:
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ limits:
+ cpu: 100m
+ memory: 256Mi
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/templates/NOTES.txt b/charts/k8s-gerrit/helm-charts/gerrit/templates/NOTES.txt
new file mode 100644
index 0000000..b71b3b0
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/templates/NOTES.txt
@@ -0,0 +1,4 @@
+A primary Gerrit instance has been deployed.
+==================================
+
+Gerrit may be accessed under: {{ .Values.ingress.host }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/templates/_helpers.tpl b/charts/k8s-gerrit/helm-charts/gerrit/templates/_helpers.tpl
new file mode 100644
index 0000000..bace6fe
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/templates/_helpers.tpl
@@ -0,0 +1,20 @@
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "gerrit.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create secret to access docker registry
+*/}}
+{{- define "imagePullSecret" }}
+{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.images.registry.name (printf "%s:%s" .Values.images.registry.ImagePullSecret.username .Values.images.registry.ImagePullSecret.password | b64enc) | b64enc }}
+{{- end }}
+
+{{/*
+Add '/' to registry if needed.
+*/}}
+{{- define "registry" -}}
+{{ if .Values.images.registry.name }}{{- printf "%s/" .Values.images.registry.name -}}{{end}}
+{{- end -}}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/templates/gerrit.configmap.yaml b/charts/k8s-gerrit/helm-charts/gerrit/templates/gerrit.configmap.yaml
new file mode 100644
index 0000000..83c188c
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/templates/gerrit.configmap.yaml
@@ -0,0 +1,78 @@
+{{- $root := . -}}
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Release.Name }}-gerrit-configmap
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ {{- range $key, $value := .Values.gerrit.etc.config }}
+ {{ $key }}:
+{{ toYaml $value | indent 4 }}
+ {{- end }}
+ {{- if not (hasKey .Values.gerrit.etc.config "healthcheck.config") }}
+ healthcheck.config: |-
+ [healthcheck "auth"]
+ # On new instances there may be no users to use for healthchecks
+ enabled = false
+ [healthcheck "querychanges"]
+ # On new instances there won't be any changes to query
+ enabled = false
+ {{- end }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Release.Name }}-gerrit-init-configmap
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ gerrit-init.yaml: |-
+ {{ if .Values.caCert -}}
+ caCertPath: /var/config/ca.crt
+ {{- end }}
+ pluginCacheEnabled: {{ .Values.gerrit.pluginManagement.cache.enabled }}
+ pluginCacheDir: /var/mnt/plugins
+ {{- if .Values.gerrit.pluginManagement.plugins }}
+ plugins:
+{{ toYaml .Values.gerrit.pluginManagement.plugins | indent 6}}
+ {{- end }}
+ {{- if .Values.gerrit.pluginManagement.libs }}
+ libs:
+{{ toYaml .Values.gerrit.pluginManagement.libs | indent 6}}
+ {{- end }}
+{{- range .Values.gerrit.additionalConfigMaps -}}
+{{- if .data }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ $root.Release.Name }}-{{ .name }}
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ $root.Release.Name }}
+ chart: {{ template "gerrit.chart" $root }}
+ heritage: {{ $root.Release.Service }}
+ release: {{ $root.Release.Name }}
+ {{- if $root.Values.additionalLabels }}
+{{ toYaml $root.Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+{{ toYaml .data | indent 2 }}
+{{- end }}
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/templates/gerrit.secrets.yaml b/charts/k8s-gerrit/helm-charts/gerrit/templates/gerrit.secrets.yaml
new file mode 100644
index 0000000..72cfad3
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/templates/gerrit.secrets.yaml
@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-gerrit-secure-config
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ {{ if .Values.gerrit.keystore -}}
+ keystore: {{ .Values.gerrit.keystore }}
+ {{- end }}
+ {{- range $key, $value := .Values.gerrit.etc.secret }}
+ {{ $key }}: {{ $value | b64enc }}
+ {{- end }}
+type: Opaque
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/templates/gerrit.service.yaml b/charts/k8s-gerrit/helm-charts/gerrit/templates/gerrit.service.yaml
new file mode 100644
index 0000000..fe16d45
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/templates/gerrit.service.yaml
@@ -0,0 +1,41 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Release.Name }}-gerrit-service
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+ {{- if .Values.gerrit.service.additionalAnnotations }}
+ annotations:
+{{ toYaml .Values.gerrit.service.additionalAnnotations | indent 4 }}
+ {{- end }}
+spec:
+ {{ with .Values.gerrit.service }}
+ {{- if .loadBalancerSourceRanges -}}
+ loadBalancerSourceRanges:
+{{- range .loadBalancerSourceRanges }}
+ - {{ . | quote }}
+{{- end }}
+ {{- end }}
+ ports:
+ - name: http
+ port: {{ .http.port }}
+ targetPort: 8080
+ {{- if .ssh.enabled }}
+ - name: ssh
+ port: {{ .ssh.port }}
+ targetPort: 29418
+ {{- end }}
+ type: {{ .type }}
+ externalTrafficPolicy: {{ .externalTrafficPolicy }}
+ {{- end }}
+ selector:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/templates/gerrit.stateful-set.yaml b/charts/k8s-gerrit/helm-charts/gerrit/templates/gerrit.stateful-set.yaml
new file mode 100644
index 0000000..a02b69e
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/templates/gerrit.stateful-set.yaml
@@ -0,0 +1,290 @@
+{{- $root := . -}}
+
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ .Release.Name }}-gerrit-stateful-set
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ serviceName: {{ .Release.Name }}-gerrit-service
+ replicas: {{ .Values.gerrit.replicas }}
+ updateStrategy:
+ rollingUpdate:
+ partition: {{ .Values.gerrit.updatePartition }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 8 }}
+ {{- end }}
+ {{- if .Values.gerrit.additionalPodLabels }}
+{{ toYaml .Values.gerrit.additionalPodLabels | indent 8 }}
+ {{- end }}
+ annotations:
+ chartRevision: "{{ .Release.Revision }}"
+ {{- if .Values.gerrit.additionalAnnotations }}
+{{ toYaml .Values.gerrit.additionalAnnotations | indent 8 }}
+ {{- end }}
+ spec:
+ {{- with .Values.gerrit.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gerrit.topologySpreadConstraints }}
+ topologySpreadConstraints:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gerrit.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gerrit.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.gerrit.priorityClassName }}
+ priorityClassName: {{ . }}
+ {{- end }}
+ terminationGracePeriodSeconds: {{ .Values.gerrit.gracefulStopTimeout }}
+ securityContext:
+ fsGroup: 100
+ {{ if .Values.images.registry.ImagePullSecret.name -}}
+ imagePullSecrets:
+ - name: {{ .Values.images.registry.ImagePullSecret.name }}
+ {{- range .Values.images.additionalImagePullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+ initContainers:
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.chownOnStartup }}
+ - name: nfs-init
+ image: {{ .Values.images.busybox.registry -}}/busybox:{{- .Values.images.busybox.tag }}
+ command:
+ - sh
+ - -c
+ args:
+ - |
+ chown 1000:100 /var/mnt/logs
+ chown 1000:100 /var/mnt/git
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: logs
+ subPathExpr: "gerrit-replica/$(POD_NAME)"
+ mountPath: "/var/mnt/logs"
+ - name: git-repositories
+ mountPath: "/var/mnt/git"
+ {{- if .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ {{- end }}
+ - name: gerrit-init
+ image: {{ template "registry" . }}{{ .Values.gerrit.images.gerritInit }}:{{ .Values.images.version }}
+ imagePullPolicy: {{ .Values.images.imagePullPolicy }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: gerrit-site
+ mountPath: "/var/gerrit"
+ - name: git-repositories
+ mountPath: "/var/mnt/git"
+ - name: logs
+ subPathExpr: "gerrit/$(POD_NAME)"
+ mountPath: "/var/mnt/logs"
+ - name: gerrit-init-config
+ mountPath: "/var/config/gerrit-init.yaml"
+ subPath: gerrit-init.yaml
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ {{- if and .Values.gerrit.pluginManagement.cache.enabled }}
+ - name: gerrit-plugin-cache
+ mountPath: "/var/mnt/plugins"
+ {{- end }}
+ {{ if eq .Values.gerrit.index.type "elasticsearch" -}}
+ - name: gerrit-index-config
+ mountPath: "/var/mnt/index"
+ {{- end }}
+ - name: gerrit-config
+ mountPath: "/var/mnt/etc/config"
+ - name: gerrit-secure-config
+ mountPath: "/var/mnt/etc/secret"
+ {{ if .Values.caCert -}}
+ - name: tls-ca
+ subPath: ca.crt
+ mountPath: "/var/config/ca.crt"
+ {{- end }}
+ {{- range .Values.gerrit.additionalConfigMaps }}
+ - name: {{ .name }}
+ mountPath: "/var/mnt/data/{{ .subDir }}"
+ {{- end }}
+ containers:
+ - name: gerrit
+ image: {{ template "registry" . }}{{ .Values.gerrit.images.gerrit }}:{{ .Values.images.version }}
+ imagePullPolicy: {{ .Values.images.imagePullPolicy }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/bin/ash"
+ - "-c"
+ - "kill -2 $(pidof java) && tail --pid=$(pidof java) -f /dev/null"
+ ports:
+ - name: gerrit-port
+ containerPort: 8080
+ {{- if .Values.gerrit.service.ssh.enabled }}
+ - name: gerrit-ssh
+ containerPort: 29418
+ {{- end }}
+ volumeMounts:
+ - name: gerrit-site
+ mountPath: "/var/gerrit"
+ - name: git-repositories
+ mountPath: "/var/mnt/git"
+ - name: logs
+ subPathExpr: "gerrit/$(POD_NAME)"
+ mountPath: "/var/mnt/logs"
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ {{ if eq .Values.gerrit.index.type "elasticsearch" -}}
+ - name: gerrit-index-config
+ mountPath: "/var/mnt/index"
+ {{- end }}
+ - name: gerrit-config
+ mountPath: "/var/mnt/etc/config"
+ - name: gerrit-secure-config
+ mountPath: "/var/mnt/etc/secret"
+ {{- range .Values.gerrit.additionalConfigMaps }}
+ - name: {{ .name }}
+ mountPath: "/var/mnt/data/{{ .subDir }}"
+ {{- end }}
+ resources:
+{{ toYaml .Values.gerrit.resources | indent 10 }}
+ livenessProbe:
+ httpGet:
+ path: /config/server/healthcheck~status
+ port: gerrit-port
+{{ toYaml .Values.gerrit.livenessProbe | indent 10 }}
+ readinessProbe:
+ httpGet:
+ path: /config/server/healthcheck~status
+ port: gerrit-port
+{{ toYaml .Values.gerrit.readinessProbe | indent 10 }}
+ startupProbe:
+ httpGet:
+ path: /config/server/healthcheck~status
+ port: gerrit-port
+{{ toYaml .Values.gerrit.startupProbe | indent 10 }}
+ volumes:
+ {{ if not .Values.gerrit.persistence.enabled -}}
+ - name: gerrit-site
+ emptyDir: {}
+ {{- end }}
+ {{- if and .Values.gerrit.pluginManagement.cache.enabled }}
+ - name: gerrit-plugin-cache
+ persistentVolumeClaim:
+ claimName: {{ .Release.Name }}-plugin-cache-pvc
+ {{- end }}
+ - name: git-repositories
+ persistentVolumeClaim:
+ {{- if .Values.gitRepositoryStorage.externalPVC.use }}
+ claimName: {{ .Values.gitRepositoryStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-git-repositories-pvc
+ {{- end }}
+ - name: logs
+ {{ if .Values.logStorage.enabled -}}
+ persistentVolumeClaim:
+ {{- if .Values.logStorage.externalPVC.use }}
+ claimName: {{ .Values.logStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-log-pvc
+ {{- end }}
+ {{ else -}}
+ emptyDir: {}
+ {{- end }}
+ - name: gerrit-init-config
+ configMap:
+ name: {{ .Release.Name }}-gerrit-init-configmap
+ {{ if eq .Values.gerrit.index.type "elasticsearch" -}}
+ - name: gerrit-index-config
+ persistentVolumeClaim:
+ claimName: {{ .Release.Name }}-gerrit-index-config-pvc
+ {{- end }}
+ - name: gerrit-config
+ configMap:
+ name: {{ .Release.Name }}-gerrit-configmap
+ - name: gerrit-secure-config
+ secret:
+ secretName: {{ .Release.Name }}-gerrit-secure-config
+ {{ if .Values.caCert -}}
+ - name: tls-ca
+ secret:
+ secretName: {{ .Release.Name }}-tls-ca
+ {{- end }}
+ {{- range .Values.gerrit.additionalConfigMaps }}
+ - name: {{ .name }}
+ configMap:
+ name: {{ if .data }}{{ $root.Release.Name }}-{{ .name }}{{ else }}{{ .name }}{{ end }}
+ {{- end }}
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ configMap:
+ name: {{ .Release.Name }}-nfs-configmap
+ {{- end }}
+ {{ if .Values.gerrit.persistence.enabled -}}
+ volumeClaimTemplates:
+ - metadata:
+ name: gerrit-site
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 8 }}
+ {{- end }}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.gerrit.persistence.size }}
+ storageClassName: {{ .Values.storageClasses.default.name }}
+ {{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/templates/gerrit.storage.yaml b/charts/k8s-gerrit/helm-charts/gerrit/templates/gerrit.storage.yaml
new file mode 100644
index 0000000..1d85fc6
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/templates/gerrit.storage.yaml
@@ -0,0 +1,45 @@
+{{- if and .Values.gerrit.pluginManagement.cache.enabled }}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-plugin-cache-pvc
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: {{ .Values.gerrit.pluginManagement.cache.size }}
+ storageClassName: {{ .Values.storageClasses.shared.name }}
+{{- end }}
+{{ if eq .Values.gerrit.index.type "elasticsearch" -}}
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-gerrit-index-config-pvc
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 10Mi
+ storageClassName: {{ .Values.storageClasses.shared.name }}
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/templates/git-gc.cronjob.yaml b/charts/k8s-gerrit/helm-charts/gerrit/templates/git-gc.cronjob.yaml
new file mode 100644
index 0000000..8230e5d
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/templates/git-gc.cronjob.yaml
@@ -0,0 +1,132 @@
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: {{ .Release.Name }}-git-gc
+ labels:
+ app.kubernetes.io/component: git-gc
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ schedule: {{ .Values.gitGC.schedule | quote }}
+ concurrencyPolicy: "Forbid"
+ jobTemplate:
+ spec:
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: git-gc
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 12 }}
+ {{- end }}
+ {{- if .Values.gitGC.additionalPodLabels }}
+{{ toYaml .Values.gitGC.additionalPodLabels | indent 12 }}
+ {{- end }}
+ annotations:
+ cluster-autoscaler.kubernetes.io/safe-to-evict: "false"
+ spec:
+ {{- with .Values.gitGC.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 10 }}
+ {{- end }}
+ {{- with .Values.gitGC.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.gitGC.affinity }}
+ affinity:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ restartPolicy: OnFailure
+ securityContext:
+ runAsUser: 1000
+ fsGroup: 100
+ {{ if .Values.images.registry.ImagePullSecret.name -}}
+ imagePullSecrets:
+ - name: {{ .Values.images.registry.ImagePullSecret.name }}
+ {{- range .Values.images.additionalImagePullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+ initContainers:
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.chownOnStartup }}
+ - name: nfs-init
+ image: {{ .Values.images.busybox.registry -}}/busybox:{{- .Values.images.busybox.tag }}
+ command:
+ - sh
+ - -c
+ args:
+ - |
+ chown 1000:100 /var/mnt/logs
+ chown 1000:100 /var/mnt/git
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: logs
+ subPathExpr: "git-gc/$(POD_NAME)"
+ mountPath: "/var/mnt/logs"
+ - name: git-repositories
+ mountPath: "/var/mnt/git"
+ {{- if .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ {{- end }}
+ containers:
+ - name: git-gc
+ imagePullPolicy: {{ .Values.images.imagePullPolicy }}
+ image: {{ template "registry" . }}{{ .Values.gitGC.image }}:{{ .Values.images.version }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ resources:
+{{ toYaml .Values.gitGC.resources | indent 14 }}
+ volumeMounts:
+ - name: git-repositories
+ mountPath: "/var/gerrit/git"
+ - name: logs
+ subPathExpr: "git-gc/$(POD_NAME)"
+ mountPath: "/var/log/git"
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ mountPath: "/etc/idmapd.conf"
+ subPath: idmapd.conf
+ {{- end }}
+ volumes:
+ - name: git-repositories
+ persistentVolumeClaim:
+ {{- if .Values.gitRepositoryStorage.externalPVC.use }}
+ claimName: {{ .Values.gitRepositoryStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-git-repositories-pvc
+ {{- end }}
+ - name: logs
+ {{ if .Values.logStorage.enabled -}}
+ persistentVolumeClaim:
+ {{- if .Values.logStorage.externalPVC.use }}
+ claimName: {{ .Values.logStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-log-pvc
+ {{- end }}
+ {{ else -}}
+ emptyDir: {}
+ {{- end }}
+ {{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain }}
+ - name: nfs-config
+ configMap:
+ name: {{ .Release.Name }}-nfs-configmap
+ {{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/templates/git-gc.storage.yaml b/charts/k8s-gerrit/helm-charts/gerrit/templates/git-gc.storage.yaml
new file mode 100644
index 0000000..c69a647
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/templates/git-gc.storage.yaml
@@ -0,0 +1,22 @@
+{{ if .Values.gitGC.logging.persistence.enabled -}}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-git-gc-logs-pvc
+ labels:
+ app.kubernetes.io/component: git-gc
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.gitGC.logging.persistence.size }}
+ storageClassName: {{ .Values.storageClasses.default.name }}
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/templates/global.secrets.yaml b/charts/k8s-gerrit/helm-charts/gerrit/templates/global.secrets.yaml
new file mode 100644
index 0000000..b2c3d5d
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/templates/global.secrets.yaml
@@ -0,0 +1,18 @@
+{{ if .Values.caCert -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-tls-ca
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ ca.crt: {{ .Values.caCert | b64enc }}
+type: Opaque
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/templates/image-pull.secret.yaml b/charts/k8s-gerrit/helm-charts/gerrit/templates/image-pull.secret.yaml
new file mode 100644
index 0000000..d107472
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/templates/image-pull.secret.yaml
@@ -0,0 +1,9 @@
+{{ if and .Values.images.registry.ImagePullSecret.name .Values.images.registry.ImagePullSecret.create -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Values.images.registry.ImagePullSecret.name }}
+type: kubernetes.io/dockerconfigjson
+data:
+ .dockerconfigjson: {{ template "imagePullSecret" . }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/templates/ingress.yaml b/charts/k8s-gerrit/helm-charts/gerrit/templates/ingress.yaml
new file mode 100644
index 0000000..eb19655
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/templates/ingress.yaml
@@ -0,0 +1,64 @@
+{{- if .Values.ingress.enabled }}
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: {{ .Release.Name }}-gerrit-ingress
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.ingress.additionalLabels }}
+{{ toYaml .Values.ingress.additionalLabels | indent 4 }}
+ {{- end }}
+ annotations:
+ nginx.ingress.kubernetes.io/proxy-body-size: {{ .Values.ingress.maxBodySize | default "50m" }}
+ {{- if .Values.ingress.additionalAnnotations }}
+{{ toYaml .Values.ingress.additionalAnnotations | indent 4 }}
+ {{- end }}
+spec:
+ {{ if .Values.ingress.tls.enabled -}}
+ tls:
+ - hosts:
+ - {{ .Values.ingress.host }}
+ {{ if .Values.ingress.tls.secret.create -}}
+ secretName: {{ .Release.Name }}-gerrit-tls-secret
+ {{- else }}
+ secretName: {{ .Values.ingress.tls.secret.name }}
+ {{- end }}
+ {{- end }}
+ rules:
+ - host: {{required "A host URL is required for the Gerrit Ingress. Please set 'ingress.host'" .Values.ingress.host }}
+ http:
+ paths:
+ - pathType: Prefix
+ path: /
+ backend:
+ service:
+ name: {{ .Release.Name }}-gerrit-service
+ port:
+ number: {{ .Values.gerrit.service.http.port }}
+{{- end }}
+---
+{{ if and .Values.ingress.tls.enabled .Values.ingress.tls.secret.create -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-gerrit-tls-secret
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.ingress.additionalLabels }}
+{{ toYaml .Values.ingress.additionalLabels | indent 4 }}
+ {{- end }}
+type: kubernetes.io/tls
+data:
+ {{ with .Values.ingress.tls -}}
+ tls.crt: {{ .cert | b64enc }}
+ tls.key: {{ .key | b64enc }}
+ {{- end }}
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/templates/log-cleaner.cronjob.yaml b/charts/k8s-gerrit/helm-charts/gerrit/templates/log-cleaner.cronjob.yaml
new file mode 100644
index 0000000..c1314f1
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/templates/log-cleaner.cronjob.yaml
@@ -0,0 +1,65 @@
+{{- if and .Values.logStorage.enabled .Values.logStorage.cleanup.enabled }}
+apiVersion: batch/v1beta1
+kind: CronJob
+metadata:
+ name: {{ .Release.Name }}-log-cleaner
+ labels:
+ app.kubernetes.io/component: log-cleaner
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ schedule: {{ .Values.logStorage.cleanup.schedule | quote }}
+ concurrencyPolicy: "Forbid"
+ jobTemplate:
+ spec:
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: log-cleaner
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 12 }}
+ {{- end }}
+ {{- if .Values.logStorage.cleanup.additionalPodLabels }}
+{{ toYaml .Values.logStorage.cleanup.additionalPodLabels | indent 12 }}
+ {{- end }}
+ spec:
+ restartPolicy: OnFailure
+ containers:
+ - name: log-cleaner
+ imagePullPolicy: {{ .Values.images.imagePullPolicy }}
+ image: {{ .Values.images.busybox.registry -}}/busybox:{{- .Values.images.busybox.tag }}
+ command:
+ - sh
+ - -c
+ args:
+ - |
+ find /var/logs/ \
+ -mindepth 1 \
+ -type f \
+ -mtime +{{ .Values.logStorage.cleanup.retentionDays }} \
+ -print \
+ -delete
+ find /var/logs/ -type d -empty -delete
+ resources:
+{{ toYaml .Values.logStorage.cleanup.resources | indent 14 }}
+ volumeMounts:
+ - name: logs
+ mountPath: "/var/logs"
+ volumes:
+ - name: logs
+ persistentVolumeClaim:
+ {{- if .Values.logStorage.externalPVC.use }}
+ claimName: {{ .Values.logStorage.externalPVC.name }}
+ {{- else }}
+ claimName: {{ .Release.Name }}-log-pvc
+ {{- end }}
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/templates/netpol.yaml b/charts/k8s-gerrit/helm-charts/gerrit/templates/netpol.yaml
new file mode 100644
index 0000000..c0cbc4d
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/templates/netpol.yaml
@@ -0,0 +1,122 @@
+{{ if .Values.networkPolicies.enabled -}}
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: {{ .Release.Name }}-default-deny-all
+ labels:
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.networkPolicies.additionalLabels }}
+{{ toYaml .Values.networkPolicies.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit.chart" . }}
+ release: {{ .Release.Name }}
+ policyTypes:
+ - Ingress
+ - Egress
+ ingress: []
+ egress: []
+---
+{{ if .Values.networkPolicies.dnsPorts -}}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: {{ .Release.Name }}-allow-dns-access
+ labels:
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.networkPolicies.additionalLabels }}
+{{ toYaml .Values.networkPolicies.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit.chart" . }}
+ release: {{ .Release.Name }}
+ policyTypes:
+ - Egress
+ egress:
+ - ports:
+ {{ range .Values.networkPolicies.dnsPorts -}}
+ - port: {{ . }}
+ protocol: UDP
+ - port: {{ . }}
+ protocol: TCP
+ {{ end }}
+{{- end }}
+---
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: gerrit-allow-external
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+spec:
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit.chart" . }}
+ release: {{ .Release.Name }}
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ ingress:
+ - ports:
+ - port: 8080
+ from: []
+---
+{{ if or .Values.gerrit.networkPolicy.ingress -}}
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: gerrit-custom-ingress-policies
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit.chart" . }}
+ release: {{ .Release.Name }}
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ ingress:
+{{ toYaml .Values.gerrit.networkPolicy.ingress | indent 2 }}
+{{- end }}
+---
+{{ if or .Values.gerrit.networkPolicy.egress -}}
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: gerrit-custom-egress-policies
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ chart: {{ template "gerrit.chart" . }}
+ release: {{ .Release.Name }}
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ egress:
+{{ toYaml .Values.gerrit.networkPolicy.egress | indent 2 }}
+{{- end }}
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/templates/nfs.configmap.yaml b/charts/k8s-gerrit/helm-charts/gerrit/templates/nfs.configmap.yaml
new file mode 100644
index 0000000..dd2c3dd
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/templates/nfs.configmap.yaml
@@ -0,0 +1,28 @@
+{{- if and .Values.nfsWorkaround.enabled .Values.nfsWorkaround.idDomain -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Release.Name }}-nfs-configmap
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+data:
+ idmapd.conf: |-
+ [General]
+
+ Verbosity = 0
+ Pipefs-Directory = /run/rpc_pipefs
+ # set your own domain here, if it differs from FQDN minus hostname
+ Domain = {{ .Values.nfsWorkaround.idDomain }}
+
+ [Mapping]
+
+ Nobody-User = nobody
+ Nobody-Group = nogroup
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/templates/storage.pvc.yaml b/charts/k8s-gerrit/helm-charts/gerrit/templates/storage.pvc.yaml
new file mode 100644
index 0000000..b262402
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/templates/storage.pvc.yaml
@@ -0,0 +1,45 @@
+{{- if not .Values.gitRepositoryStorage.externalPVC.use }}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-git-repositories-pvc
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: {{ .Values.gitRepositoryStorage.size }}
+ storageClassName: {{ .Values.storageClasses.shared.name }}
+{{- end }}
+{{- if and .Values.logStorage.enabled (not .Values.logStorage.externalPVC.use) }}
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-log-pvc
+ labels:
+ app.kubernetes.io/component: gerrit
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: {{ .Values.logStorage.size }}
+ storageClassName: {{ .Values.storageClasses.shared.name }}
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/templates/storageclasses.yaml b/charts/k8s-gerrit/helm-charts/gerrit/templates/storageclasses.yaml
new file mode 100644
index 0000000..552cd6a
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/templates/storageclasses.yaml
@@ -0,0 +1,53 @@
+{{ if .Values.storageClasses.default.create -}}
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: {{ .Values.storageClasses.default.name }}
+ labels:
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+provisioner: {{ .Values.storageClasses.default.provisioner }}
+reclaimPolicy: {{ .Values.storageClasses.default.reclaimPolicy }}
+{{ if .Values.storageClasses.default.parameters -}}
+parameters:
+{{- range $key, $value := .Values.storageClasses.default.parameters }}
+ {{ $key }}: {{ $value }}
+{{- end }}
+mountOptions:
+{{- range $value := .Values.storageClasses.default.mountOptions }}
+ - {{ $value }}
+{{- end }}
+allowVolumeExpansion: {{ .Values.storageClasses.default.allowVolumeExpansion }}
+{{- end }}
+{{- end }}
+---
+{{ if .Values.storageClasses.shared.create -}}
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: {{ .Values.storageClasses.shared.name }}
+ labels:
+ chart: {{ template "gerrit.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ {{- if .Values.additionalLabels }}
+{{ toYaml .Values.additionalLabels | indent 4 }}
+ {{- end }}
+provisioner: {{ .Values.storageClasses.shared.provisioner }}
+reclaimPolicy: {{ .Values.storageClasses.shared.reclaimPolicy }}
+{{ if .Values.storageClasses.shared.parameters -}}
+parameters:
+{{- range $key, $value := .Values.storageClasses.shared.parameters }}
+ {{ $key }}: {{ $value }}
+{{- end }}
+mountOptions:
+{{- range $value := .Values.storageClasses.shared.mountOptions }}
+ - {{ $value }}
+{{- end }}
+allowVolumeExpansion: {{ .Values.storageClasses.shared.allowVolumeExpansion }}
+{{- end }}
+{{- end }}
diff --git a/charts/k8s-gerrit/helm-charts/gerrit/values.yaml b/charts/k8s-gerrit/helm-charts/gerrit/values.yaml
new file mode 100644
index 0000000..1135aa9
--- /dev/null
+++ b/charts/k8s-gerrit/helm-charts/gerrit/values.yaml
@@ -0,0 +1,333 @@
+images:
+ busybox:
+ registry: docker.io
+ tag: latest
+ # Registry used for container images created by this project
+ registry:
+ # The registry name must NOT contain a trailing slash
+ name:
+ ImagePullSecret:
+ # Leave blank, if no ImagePullSecret is needed.
+ name: image-pull-secret
+ # If set to false, the gerrit chart expects either a ImagePullSecret
+ # with the name configured above to be present on the cluster or that no
+ # credentials are needed.
+ create: false
+ username:
+ password:
+ version: latest
+ imagePullPolicy: Always
+ # Additional ImagePullSecrets that already exist and should be used by the
+ # pods of this chart. E.g. to pull busybox from dockerhub.
+ additionalImagePullSecrets: []
+
+# Additional labels that should be applied to all resources
+additionalLabels: {}
+
+storageClasses:
+ # Storage class used for storing logs and other pod-specific persisted data
+ default:
+ # If create is set to false, an existing StorageClass with the given
+ # name is expected to exist in the cluster. Setting create to true will
+ # create a storage class with the parameters given below.
+ name: default
+ create: false
+ provisioner: kubernetes.io/aws-ebs
+ reclaimPolicy: Delete
+ # Use the parameters key to set all parameters needed for the provisioner
+ parameters:
+ type: gp2
+ fsType: ext4
+ mountOptions: []
+ allowVolumeExpansion: false
+ # Storage class used for storing git repositories. Has to provide RWM access.
+ shared:
+ # If create is set to false, an existing StorageClass with RWM access
+ # mode and the given name has to be provided.
+ name: shared-storage
+ create: false
+ provisioner: nfs
+ reclaimPolicy: Delete
+ # Use the parameters key to set all parameters needed for the provisioner
+ parameters:
+ mountOptions: vers=4.1
+ mountOptions: []
+ allowVolumeExpansion: false
+
+
+nfsWorkaround:
+ enabled: false
+ chownOnStartup: false
+ idDomain: localdomain.com
+
+
+networkPolicies:
+ enabled: false
+ dnsPorts:
+ - 53
+ - 8053
+
+
+gitRepositoryStorage:
+ externalPVC:
+ use: false
+ name: git-repositories-pvc
+ size: 5Gi
+
+logStorage:
+ enabled: false
+ externalPVC:
+ use: false
+ name: gerrit-logs-pvc
+ size: 5Gi
+ cleanup:
+ enabled: false
+ additionalPodLabels: {}
+ schedule: "0 0 * * *"
+ retentionDays: 14
+ resources:
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ limits:
+ cpu: 100m
+ memory: 256Mi
+
+caCert:
+
+ingress:
+ enabled: false
+ host:
+ # The maximum body size to allow for requests. Use "0" to allow unlimited
+ # reuqest body sizes.
+ maxBodySize: 50m
+ additionalAnnotations:
+ kubernetes.io/ingress.class: nginx
+ # nginx.ingress.kubernetes.io/server-alias: example.com
+ # nginx.ingress.kubernetes.io/whitelist-source-range: xxx.xxx.xxx.xxx
+ tls:
+ enabled: false
+ secret:
+ create: true
+ # `name` will only be used, if `create` is set to false to bind an
+ # existing secret. Otherwise the name will be automatically generated to
+ # avoid conflicts between multiple chart installations.
+ name:
+ # `cert`and `key` will only be used, if the secret will be created by
+ # this chart.
+ cert: |-
+ -----BEGIN CERTIFICATE-----
+
+ -----END CERTIFICATE-----
+ key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+
+ -----END RSA PRIVATE KEY-----
+
+
+gitGC:
+ image: k8sgerrit/git-gc
+
+ tolerations: []
+ nodeSelector: {}
+ affinity: {}
+ additionalPodLabels: {}
+
+ schedule: 0 6,18 * * *
+
+ resources:
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ limits:
+ cpu: 100m
+ memory: 256Mi
+
+ logging:
+ persistence:
+ enabled: true
+ size: 1Gi
+
+
+gerrit:
+ images:
+ gerritInit: k8sgerrit/gerrit-init
+ gerrit: k8sgerrit/gerrit
+
+ tolerations: []
+ topologySpreadConstraints: {}
+ nodeSelector: {}
+ affinity: {}
+ additionalAnnotations: {}
+ additionalPodLabels: {}
+
+ replicas: 1
+ updatePartition: 0
+
+ # The memory limit has to be higher than the configures heap-size for Java!
+ resources:
+ requests:
+ cpu: 1
+ memory: 5Gi
+ limits:
+ cpu: 1
+ memory: 6Gi
+
+ persistence:
+ enabled: true
+ size: 10Gi
+
+ livenessProbe:
+ initialDelaySeconds: 30
+ periodSeconds: 5
+
+ readinessProbe:
+ initialDelaySeconds: 5
+ periodSeconds: 1
+
+ startupProbe:
+ initialDelaySeconds: 10
+ periodSeconds: 30
+
+ gracefulStopTimeout: 90
+
+ # The general NetworkPolicy rules implemented by this chart may be too restrictive
+ # for some setups, e.g. when trying to replicate to a Gerrit replica. Here
+ # custom rules may be added to whitelist some additional connections.
+ networkPolicy:
+ ingress: []
+ egress: []
+ # An example for an egress rule to allow replication to a Gerrit replica
+ # installed with the gerrit-replica setup in the same cluster and namespace
+ # by using the service as the replication destination
+ # (e.g. http://gerrit-replica-git-backend-service:80/git/${name}.git):
+ #
+ # - to:
+ # - podSelector:
+ # matchLabels:
+ # app: git-backend
+
+ service:
+ additionalAnnotations: {}
+ loadBalancerSourceRanges: []
+ type: NodePort
+ externalTrafficPolicy: Cluster
+ http:
+ port: 80
+ ssh:
+ enabled: false
+ port: 29418
+
+ # `gerrit.keystore` expects a base64-encoded Java-keystore
+ # Since Java keystores are binary files, adding the unencoded content and
+ # automatic encoding using helm does not work here.
+ keystore:
+
+ index:
+ # Either `lucene` or `elasticsearch`
+ type: lucene
+
+ pluginManagement:
+ plugins: []
+ # A plugin packaged in the gerrit.war-file
+ # - name: download-commands
+
+ # A plugin packaged in the gerrit.war-file that will also be installed as a
+ # lib
+ # - name: replication
+ # installAsLibrary: true
+
+ # A plugin that will be downloaded on startup
+ # - name: delete-project
+ # url: https://example.com/gerrit-plugins/delete-project.jar
+ # sha1:
+ # installAsLibrary: false
+
+ # Only downloaded plugins will be cached. This will be ignored, if no plugins
+ # are downloaded.
+ libs: []
+ cache:
+ enabled: false
+ size: 1Gi
+
+ priorityClassName:
+
+ etc:
+ # Some values are expected to have a specific value for the deployment installed
+ # by this chart to work. These are marked with `# FIXED`.
+ # Do not change them!
+ config:
+ gerrit.config: |-
+ [gerrit]
+ basePath = git # FIXED
+ serverId = gerrit-1
+ # The canonical web URL has to be set to the Ingress host, if an Ingress
+ # is used. If a LoadBalancer-service is used, this should be set to the
+ # LoadBalancer's external IP. This can only be done manually after installing
+ # the chart, when you know the external IP the LoadBalancer got from the
+ # cluster.
+ canonicalWebUrl = http://example.com/
+ disableReverseDnsLookup = true
+ [index]
+ type = LUCENE
+ [auth]
+ type = DEVELOPMENT_BECOME_ANY_ACCOUNT
+ [httpd]
+ # If using an ingress use proxy-http or proxy-https
+ listenUrl = proxy-http://*:8080/
+ requestLog = true
+ gracefulStopTimeout = 1m
+ [sshd]
+ listenAddress = off
+ [transfer]
+ timeout = 120 s
+ [user]
+ name = Gerrit Code Review
+ email = gerrit@example.com
+ anonymousCoward = Unnamed User
+ [cache]
+ directory = cache
+ [container]
+ user = gerrit # FIXED
+ javaHome = /usr/lib/jvm/java-11-openjdk # FIXED
+ javaOptions = -Djavax.net.ssl.trustStore=/var/gerrit/etc/keystore # FIXED
+ javaOptions = -Xms200m
+ # Has to be lower than 'gerrit.resources.limits.memory'. Also
+ # consider memories used by other applications in the container.
+ javaOptions = -Xmx4g
+
+ replication.config: |-
+ [gerrit]
+ autoReload = false
+ replicateOnStartup = true
+ defaultForceUpdate = true
+
+ # [remote "replica"]
+ # url = http://gerrit-replica.example.com/git/${name}.git
+ # replicationDelay = 0
+ # timeout = 30
+
+ secret:
+ secure.config: |-
+ # Password for the keystore added as value for 'gerritReplica.keystore'
+ # Only needed, if SSL is enabled.
+ #[httpd]
+ # sslKeyPassword = gerrit
+
+ # Credentials for replication targets
+ # [remote "replica"]
+ # username = git
+ # password = secret
+
+ # ssh_host_ecdsa_key: |-
+ # -----BEGIN EC PRIVATE KEY-----
+
+ # -----END EC PRIVATE KEY-----
+
+ # ssh_host_ecdsa_key.pub: ecdsa-sha2-nistp256...
+
+ additionalConfigMaps:
+ # - name:
+ # subDir:
+ # data:
+ # file.txt: test
diff --git a/charts/k8s-gerrit/istio/README.md b/charts/k8s-gerrit/istio/README.md
new file mode 100644
index 0000000..2f03490
--- /dev/null
+++ b/charts/k8s-gerrit/istio/README.md
@@ -0,0 +1,25 @@
+# Istio for Gerrit
+
+## Configuring istio
+
+It is recommended to set a static IP to be used by the LoadBalancer service
+deployed by istio. To do that set
+`spec.components.ingressGateways[0].k8s.overlays[0].patches[0].value`, which is
+commented out by default, which causes the use of an ephemeral IP.
+
+## Installing istio
+
+Create the `istio-system`-namespace:
+
+```sh
+kubectl apply -f ./istio/istio-system-namespace.yaml
+```
+
+Verify that your istioctl version (`istioctl version`) matches the version in
+`istio/gerrit.profile.yaml` under `spec.tag`.
+
+Install istio:
+
+```sh
+istioctl install -f istio/gerrit.profile.yaml
+```
diff --git a/charts/k8s-gerrit/istio/gerrit.profile.yaml b/charts/k8s-gerrit/istio/gerrit.profile.yaml
new file mode 100644
index 0000000..d81dea6
--- /dev/null
+++ b/charts/k8s-gerrit/istio/gerrit.profile.yaml
@@ -0,0 +1,312 @@
+apiVersion: install.istio.io/v1alpha1
+kind: IstioOperator
+spec:
+ components:
+ base:
+ enabled: true
+ cni:
+ enabled: false
+ egressGateways:
+ - enabled: false
+ k8s:
+ env:
+ - name: ISTIO_META_ROUTER_MODE
+ value: standard
+ hpaSpec:
+ maxReplicas: 5
+ metrics:
+ - resource:
+ name: cpu
+ target:
+ type: Utilization
+ averageUtilization: 80
+ type: Resource
+ minReplicas: 1
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: istio-egressgateway
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 1024Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+ service:
+ ports:
+ - name: http2
+ port: 80
+ protocol: TCP
+ targetPort: 8080
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 8443
+ - name: tls
+ port: 15443
+ protocol: TCP
+ targetPort: 15443
+ strategy:
+ rollingUpdate:
+ maxSurge: 100%
+ maxUnavailable: 25%
+ name: istio-egressgateway
+ ingressGateways:
+ - enabled: true
+ k8s:
+ env:
+ - name: ISTIO_META_ROUTER_MODE
+ value: standard
+ hpaSpec:
+ maxReplicas: 5
+ metrics:
+ - resource:
+ name: cpu
+ target:
+ type: Utilization
+ averageUtilization: 80
+ type: Resource
+ minReplicas: 5
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: istio-ingressgateway
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 1024Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+ service:
+ ports:
+ - name: status-port
+ port: 15021
+ protocol: TCP
+ targetPort: 15021
+ - name: http2
+ port: 80
+ protocol: TCP
+ targetPort: 8080
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 8443
+ - name: tcp-istiod
+ port: 15012
+ protocol: TCP
+ targetPort: 15012
+ # - name: tls
+ # port: 15443
+ # protocol: TCP
+ # targetPort: 15443
+ - name: ssh
+ port: 29418
+ protocol: TCP
+ targetPort: 29418
+ strategy:
+ rollingUpdate:
+ maxSurge: 100%
+ maxUnavailable: 25%
+ overlays:
+ - kind: Service
+ name: istio-ingressgateway
+ patches:
+ - path: spec.loadBalancerIP
+ # TO_BE_CHANGED: Change IP
+ #value: xxx.xxx.xxx.xxx
+ - path: spec.loadBalancerSourceRanges
+ # TO_BE_CHANGED: Change IP-Range to whitelist
+ # value:
+ # - 0.0.0.0/32
+ - path: metadata.annotations
+ # TO_BE_CHANGED: Annotations to be set in the service, e.g. to
+ # configure automated DNS and certificate management in Gardener
+ # value:
+ # dns.gardener.cloud/dnsnames: '*.example.com'
+ # dns.gardener.cloud/class: garden
+ # dns.gardener.cloud/ttl: "600"
+ # cert.gardener.cloud/commonName: '*.example.com'
+ # cert.gardener.cloud/purpose: managed
+ # cert.gardener.cloud/secretname: tls-secret
+ name: istio-ingressgateway
+ istiodRemote:
+ enabled: false
+ pilot:
+ enabled: true
+ k8s:
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ hpaSpec:
+ minReplicas: 2
+ readinessProbe:
+ httpGet:
+ path: /ready
+ port: 8080
+ initialDelaySeconds: 1
+ periodSeconds: 3
+ timeoutSeconds: 5
+ strategy:
+ rollingUpdate:
+ maxSurge: 100%
+ maxUnavailable: 50%
+ hub: docker.io/istio
+ meshConfig:
+ accessLogFile: /dev/stdout
+ defaultConfig:
+ proxyMetadata: {}
+ enablePrometheusMerge: true
+ profile: default
+ tag: 1.16.0
+ values:
+ base:
+ enableCRDTemplates: false
+ validationURL: ""
+ gateways:
+ istio-egressgateway:
+ autoscaleEnabled: true
+ env: {}
+ name: istio-egressgateway
+ secretVolumes:
+ - mountPath: /etc/istio/egressgateway-certs
+ name: egressgateway-certs
+ secretName: istio-egressgateway-certs
+ - mountPath: /etc/istio/egressgateway-ca-certs
+ name: egressgateway-ca-certs
+ secretName: istio-egressgateway-ca-certs
+ type: ClusterIP
+ istio-ingressgateway:
+ autoscaleEnabled: true
+ env: {}
+ name: istio-ingressgateway
+ secretVolumes:
+ - mountPath: /etc/istio/ingressgateway-certs
+ name: ingressgateway-certs
+ secretName: istio-ingressgateway-certs
+ - mountPath: /etc/istio/ingressgateway-ca-certs
+ name: ingressgateway-ca-certs
+ secretName: istio-ingressgateway-ca-certs
+ type: LoadBalancer
+ global:
+ configValidation: true
+ defaultNodeSelector: {}
+ defaultPodDisruptionBudget:
+ enabled: true
+ defaultResources:
+ requests:
+ cpu: 10m
+ imagePullPolicy: ""
+ imagePullSecrets: []
+ istioNamespace: istio-system
+ istiod:
+ enableAnalysis: false
+ jwtPolicy: third-party-jwt
+ logAsJson: false
+ logging:
+ level: default:info
+ meshNetworks: {}
+ mountMtlsCerts: false
+ multiCluster:
+ clusterName: ""
+ enabled: false
+ network: ""
+ omitSidecarInjectorConfigMap: false
+ oneNamespace: false
+ operatorManageWebhooks: false
+ pilotCertProvider: istiod
+ priorityClassName: ""
+ proxy:
+ autoInject: enabled
+ clusterDomain: cluster.local
+ componentLogLevel: misc:error
+ enableCoreDump: false
+ excludeIPRanges: ""
+ excludeInboundPorts: ""
+ excludeOutboundPorts: ""
+ image: proxyv2
+ includeIPRanges: '*'
+ # Use this value, if more detailed logging output is needed, e.g. for
+ # debugging.
+ logLevel: warning
+ privileged: false
+ readinessFailureThreshold: 30
+ readinessInitialDelaySeconds: 1
+ readinessPeriodSeconds: 2
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 1024Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+ statusPort: 15020
+ tracer: zipkin
+ proxy_init:
+ image: proxyv2
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 1024Mi
+ requests:
+ cpu: 10m
+ memory: 10Mi
+ sds:
+ token:
+ aud: istio-ca
+ sts:
+ servicePort: 0
+ tracer:
+ datadog: {}
+ lightstep: {}
+ stackdriver: {}
+ zipkin: {}
+ useMCP: false
+ istiodRemote:
+ injectionURL: ""
+ pilot:
+ autoscaleEnabled: true
+ autoscaleMax: 5
+ autoscaleMin: 2
+ configMap: true
+ cpu:
+ targetAverageUtilization: 80
+ enableProtocolSniffingForInbound: true
+ enableProtocolSniffingForOutbound: true
+ env: {}
+ image: pilot
+ keepaliveMaxServerConnectionAge: 24h
+ nodeSelector: {}
+ podLabels: {}
+ replicaCount: 1
+ traceSampling: 1
+ sidecarInjectorWebhook:
+ enableNamespacesByDefault: false
+ objectSelector:
+ autoInject: true
+ enabled: false
+ rewriteAppHTTPProbe: true
+ telemetry:
+ enabled: true
+ v2:
+ enabled: true
+ metadataExchange:
+ wasmEnabled: false
+ prometheus:
+ enabled: true
+ wasmEnabled: false
+ stackdriver:
+ configOverride: {}
+ enabled: false
+ logging: false
+ monitoring: false
+ topology: false
diff --git a/charts/k8s-gerrit/istio/istio-system-namespace.yaml b/charts/k8s-gerrit/istio/istio-system-namespace.yaml
new file mode 100644
index 0000000..f394e91
--- /dev/null
+++ b/charts/k8s-gerrit/istio/istio-system-namespace.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: istio-system
diff --git a/charts/k8s-gerrit/istio/namespace.yaml b/charts/k8s-gerrit/istio/namespace.yaml
new file mode 100644
index 0000000..6e9fb38
--- /dev/null
+++ b/charts/k8s-gerrit/istio/namespace.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: gerrit-replica
+ labels:
+ istio-injection: enabled
diff --git a/charts/k8s-gerrit/operator/.gitignore b/charts/k8s-gerrit/operator/.gitignore
new file mode 100644
index 0000000..7021fd2
--- /dev/null
+++ b/charts/k8s-gerrit/operator/.gitignore
@@ -0,0 +1,4 @@
+.classpath
+.project
+.settings/
+/target/
diff --git a/charts/k8s-gerrit/operator/README.md b/charts/k8s-gerrit/operator/README.md
new file mode 100644
index 0000000..40db2fd
--- /dev/null
+++ b/charts/k8s-gerrit/operator/README.md
@@ -0,0 +1,4 @@
+# Gerrit Operator
+
+Detailed documentation about the Gerrit operator can be found
+[here](../Documentation/operator.md).
diff --git a/charts/k8s-gerrit/operator/k8s/operator/namespace.yaml b/charts/k8s-gerrit/operator/k8s/operator/namespace.yaml
new file mode 100644
index 0000000..9ce8374
--- /dev/null
+++ b/charts/k8s-gerrit/operator/k8s/operator/namespace.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: gerrit-operator
diff --git a/charts/k8s-gerrit/operator/k8s/operator/operator.yaml b/charts/k8s-gerrit/operator/k8s/operator/operator.yaml
new file mode 100644
index 0000000..02c6ef6
--- /dev/null
+++ b/charts/k8s-gerrit/operator/k8s/operator/operator.yaml
@@ -0,0 +1,63 @@
+## Required to use an external/persistent keystore, otherwise a keystore using
+## self-signed certificates will be generated
+# ---
+# apiVersion: v1
+# kind: Secret
+# metadata:
+# name: gerrit-operator-ssl
+# namespace: gerrit-operator
+# data:
+# keystore.jks: # base64-encoded Java keystore
+# keystore.password: # base64-encoded Java keystore password
+# type: Opaque
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: gerrit-operator
+ namespace: gerrit-operator
+spec:
+ selector:
+ matchLabels:
+ app: gerrit-operator
+ template:
+ metadata:
+ labels:
+ app: gerrit-operator
+ spec:
+ serviceAccountName: gerrit-operator
+ containers:
+ - name: operator
+ image: k8sgerrit/gerrit-operator
+ imagePullPolicy: Always
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: INGRESS
+ value: none
+ ports:
+ - containerPort: 80
+ readinessProbe:
+ httpGet:
+ path: /health
+ port: 8080
+ scheme: HTTPS
+ initialDelaySeconds: 1
+ livenessProbe:
+ httpGet:
+ path: /health
+ port: 8080
+ scheme: HTTPS
+ initialDelaySeconds: 30
+ ## Only required, if an external/persistent keystore is being used.
+ # volumeMounts:
+ # - name: ssl
+ # readOnly: true
+ # mountPath: /operator
+ # volumes:
+ # - name: ssl
+ # secret:
+ # secretName: gerrit-operator-ssl
diff --git a/charts/k8s-gerrit/operator/k8s/operator/rbac.yaml b/charts/k8s-gerrit/operator/k8s/operator/rbac.yaml
new file mode 100644
index 0000000..201cce7
--- /dev/null
+++ b/charts/k8s-gerrit/operator/k8s/operator/rbac.yaml
@@ -0,0 +1,87 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: gerrit-operator
+ namespace: gerrit-operator
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: gerrit-operator-admin
+subjects:
+- kind: ServiceAccount
+ name: gerrit-operator
+ namespace: gerrit-operator
+roleRef:
+ kind: ClusterRole
+ name: gerrit-operator
+ apiGroup: ""
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: gerrit-operator
+rules:
+- apiGroups:
+ - "batch"
+ resources:
+ - cronjobs
+ verbs:
+ - '*'
+- apiGroups:
+ - "apps"
+ resources:
+ - statefulsets
+ - deployments
+ verbs:
+ - '*'
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - persistentvolumeclaims
+ - secrets
+ - services
+ verbs:
+ - '*'
+- apiGroups:
+ - "storage.k8s.io"
+ resources:
+ - storageclasses
+ verbs:
+ - 'get'
+ - 'list'
+- apiGroups:
+ - "apiextensions.k8s.io"
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - '*'
+- apiGroups:
+ - "networking.k8s.io"
+ resources:
+ - ingresses
+ verbs:
+ - '*'
+- apiGroups:
+ - "gerritoperator.google.com"
+ resources:
+ - '*'
+ verbs:
+ - '*'
+- apiGroups:
+ - "networking.istio.io"
+ resources:
+ - "gateways"
+ - "virtualservices"
+ - "destinationrules"
+ verbs:
+ - '*'
+- apiGroups:
+ - "admissionregistration.k8s.io"
+ resources:
+ - 'validatingwebhookconfigurations'
+ verbs:
+ - '*'
diff --git a/charts/k8s-gerrit/operator/k8s/resources/rbac.yaml b/charts/k8s-gerrit/operator/k8s/resources/rbac.yaml
new file mode 100644
index 0000000..a31f4a7
--- /dev/null
+++ b/charts/k8s-gerrit/operator/k8s/resources/rbac.yaml
@@ -0,0 +1,30 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: gerrit
+ namespace: gerrit #CHANGE: Change it to the namespace running Gerrit
+
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: gerrit
+rules:
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["get", "list"]
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: gerrit
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: gerrit
+subjects:
+- kind: ServiceAccount
+ name: gerrit
+ namespace: gerrit #CHANGE: Change it to the namespace running Gerrit
diff --git a/charts/k8s-gerrit/operator/pom.xml b/charts/k8s-gerrit/operator/pom.xml
new file mode 100644
index 0000000..890c400
--- /dev/null
+++ b/charts/k8s-gerrit/operator/pom.xml
@@ -0,0 +1,398 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <groupId>com.google.gerrit.operator</groupId>
+ <artifactId>operator</artifactId>
+ <version>${revision}</version>
+
+ <name>Gerrit Kubernetes Operator</name>
+ <description>Provisions and operates Gerrit instances in Kubernetes</description>
+ <packaging>jar</packaging>
+
+ <properties>
+ <revision>1.0.0-SNAPSHOT</revision>
+
+ <fabric8.version>6.6.2</fabric8.version>
+ <flogger.version>0.7.4</flogger.version>
+ <guice.version>5.1.0</guice.version>
+ <javaoperatorsdk.version>4.3.3</javaoperatorsdk.version>
+ <jetty.version>11.0.15</jetty.version>
+ <lombok.version>1.18.28</lombok.version>
+ <maven.compiler.source>11</maven.compiler.source>
+ <maven.compiler.target>11</maven.compiler.target>
+ <docker.registry>docker.io</docker.registry>
+ <docker.org>k8sgerrit</docker.org>
+
+ <test.docker.registry>docker.io</test.docker.registry>
+ <test.docker.org>k8sgerritdev</test.docker.org>
+ </properties>
+
+ <profiles>
+ <profile>
+ <id>publish</id>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>com.google.cloud.tools</groupId>
+ <artifactId>jib-maven-plugin</artifactId>
+ <version>3.3.1</version>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>build</goal>
+ </goals>
+ <configuration>
+ <container>
+ <mainClass>com.google.gerrit.k8s.operator.Main</mainClass>
+ </container>
+ <containerizingMode>packaged</containerizingMode>
+ <from>
+ 
+ </from>
+ <to>
+ 
+ <tags>
+ <tag>${project.version}</tag>
+ </tags>
+ </to>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+ <profile>
+ <id>integration-test</id>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>properties-maven-plugin</artifactId>
+ <version>1.1.0</version>
+ <executions>
+ <execution>
+ <phase>initialize</phase>
+ <goals>
+ <goal>read-project-properties</goal>
+ </goals>
+ <configuration>
+ <files>
+ <file>${basedir}/test.properties</file>
+ </files>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>com.google.cloud.tools</groupId>
+ <artifactId>jib-maven-plugin</artifactId>
+ <version>3.3.1</version>
+ <executions>
+ <execution>
+ <phase>pre-integration-test</phase>
+ <goals>
+ <goal>build</goal>
+ </goals>
+ <configuration>
+ <container>
+ <mainClass>com.google.gerrit.k8s.operator.Main</mainClass>
+ </container>
+ <containerizingMode>packaged</containerizingMode>
+ <from>
+ 
+ </from>
+ <to>
+ 
+ <tags>
+ <tag>${project.version}</tag>
+ </tags>
+ </to>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-failsafe-plugin</artifactId>
+ <version>2.22.2</version>
+ <executions>
+ <execution>
+ <phase>integration-test</phase>
+ <goals>
+ <goal>integration-test</goal>
+ <goal>verify</goal>
+ </goals>
+ <configuration>
+ <includes>
+ <include>**/*E2E.java</include>
+ </includes>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+ </profiles>
+
+ <dependencies>
+ <dependency>
+ <groupId>io.javaoperatorsdk</groupId>
+ <artifactId>operator-framework</artifactId>
+ <version>${javaoperatorsdk.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>io.javaoperatorsdk</groupId>
+ <artifactId>micrometer-support</artifactId>
+ <version>${javaoperatorsdk.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>io.javaoperatorsdk</groupId>
+ <artifactId>operator-framework-junit-5</artifactId>
+ <version>${javaoperatorsdk.version}</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>io.fabric8</groupId>
+ <artifactId>kubernetes-client</artifactId>
+ <version>${fabric8.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>io.fabric8</groupId>
+ <artifactId>istio-client</artifactId>
+ <version>${fabric8.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>io.fabric8</groupId>
+ <artifactId>crd-generator-apt</artifactId>
+ <version>${fabric8.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>io.fabric8</groupId>
+ <artifactId>generator-annotations</artifactId>
+ <version>${fabric8.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.projectlombok</groupId>
+ <artifactId>lombok</artifactId>
+ <scope>provided</scope>
+ <version>${lombok.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-server</artifactId>
+ <version>${jetty.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-servlet</artifactId>
+ <version>${jetty.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>com.google.flogger</groupId>
+ <artifactId>flogger</artifactId>
+ <version>${flogger.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>com.google.flogger</groupId>
+ <artifactId>flogger-log4j2-backend</artifactId>
+ <version>${flogger.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>com.google.inject</groupId>
+ <artifactId>guice</artifactId>
+ <version>${guice.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>com.google.inject.extensions</groupId>
+ <artifactId>guice-assistedinject</artifactId>
+ <version>${guice.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.logging.log4j</groupId>
+ <artifactId>log4j-slf4j-impl</artifactId>
+ <version>2.19.0</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jgit</groupId>
+ <artifactId>org.eclipse.jgit</artifactId>
+ <version>6.5.0.202303070854-r</version>
+ </dependency>
+ <dependency>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcpkix-jdk18on</artifactId>
+ <version>1.73</version>
+ </dependency>
+ <dependency>
+ <groupId>com.urswolfer.gerrit.client.rest</groupId>
+ <artifactId>gerrit-rest-java-client</artifactId>
+ <version>0.9.5</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-core</artifactId>
+ <version>4.8.0</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>io.fabric8</groupId>
+ <artifactId>kubernetes-server-mock</artifactId>
+ <version>${fabric8.version}</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>com.google.truth</groupId>
+ <artifactId>truth</artifactId>
+ <version>0.32</version>
+ </dependency>
+ <dependency>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter-params</artifactId>
+ <version>5.9.2</version>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>io.fabric8</groupId>
+ <artifactId>java-generator-maven-plugin</artifactId>
+ <version>${fabric8.version}</version>
+ <configuration>
+ <source>${project.basedir}/src/main/resources/crd/emissary-crds.yaml</source>
+ <!-- Generate sundrio @Buildable annotations that generate Builder classes-->
+ <extraAnnotations>true</extraAnnotations>
+ </configuration>
+ <executions>
+ <execution>
+ <goals>
+ <goal>generate</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>com.spotify.fmt</groupId>
+ <artifactId>fmt-maven-plugin</artifactId>
+ <version>2.19</version>
+ <executions>
+ <execution>
+ <goals>
+ <goal>format</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-jar-plugin</artifactId>
+ <version>3.2.2</version>
+ <configuration>
+ <archive>
+ <manifest>
+ <mainClass>com.google.gerrit.k8s.operator.Main</mainClass>
+ <addDefaultImplementationEntries>
+ true
+ </addDefaultImplementationEntries>
+ </manifest>
+ </archive>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>com.google.cloud.tools</groupId>
+ <artifactId>jib-maven-plugin</artifactId>
+ <version>3.3.1</version>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>dockerBuild</goal>
+ </goals>
+ <configuration>
+ <container>
+ <mainClass>com.google.gerrit.k8s.operator.Main</mainClass>
+ </container>
+ <containerizingMode>packaged</containerizingMode>
+ <from>
+ 
+ </from>
+ <to>
+ 
+ <tags>${revision}</tags>
+ </to>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-resources-plugin</artifactId>
+ <version>3.3.1</version>
+ <executions>
+ <execution>
+ <id>copy-crds</id>
+ <phase>package</phase>
+ <goals>
+ <goal>copy-resources</goal>
+ </goals>
+ <configuration>
+ <outputDirectory>../helm-charts/gerrit-operator-crds/templates</outputDirectory>
+ <resources>
+ <resource>
+ <directory>target/classes/META-INF/fabric8</directory>
+ <includes>
+ <include>*-v1.yml</include>
+ </includes>
+ </resource>
+ </resources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>3.10.0</version>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <version>2.22.2</version>
+ <configuration>
+ <includes>
+ <include>**/*Test.java</include>
+ </includes>
+ <rerunFailingTestsCount>1</rerunFailingTestsCount>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <version>3.4.0</version>
+ <executions>
+ <execution>
+ <id>add-source</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>add-source</goal>
+ </goals>
+ <configuration>
+ <sources>
+ <source>${project.build.directory}/generated-sources/annotations/</source>
+ <source>${project.build.directory}/generated-sources/java/</source>
+ <source>${project.build.directory}/generated-test-sources/java/</source>
+ </sources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/Constants.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/Constants.java
new file mode 100644
index 0000000..d2fb405
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/Constants.java
@@ -0,0 +1,23 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator;
+
+import com.google.inject.AbstractModule;
+
+public class Constants extends AbstractModule {
+ public static final String[] VERSIONS = new String[] {"v1alpha"};
+ public static final String[] CUSTOM_RESOURCES =
+ new String[] {"GerritCluster", "Gerrit", "Receiver", "GerritNetwork", "GitGarbageCollection"};
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/EnvModule.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/EnvModule.java
new file mode 100644
index 0000000..1f5b04c
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/EnvModule.java
@@ -0,0 +1,35 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator;
+
+import com.google.gerrit.k8s.operator.network.IngressType;
+import com.google.inject.AbstractModule;
+import com.google.inject.name.Names;
+
+public class EnvModule extends AbstractModule {
+ @Override
+ protected void configure() {
+ bind(String.class)
+ .annotatedWith(Names.named("Namespace"))
+ .toInstance(System.getenv("NAMESPACE"));
+
+ String ingressTypeEnv = System.getenv("INGRESS");
+ IngressType ingressType =
+ ingressTypeEnv == null
+ ? IngressType.NONE
+ : IngressType.valueOf(ingressTypeEnv.toUpperCase());
+ bind(IngressType.class).annotatedWith(Names.named("IngressType")).toInstance(ingressType);
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/GerritOperator.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/GerritOperator.java
new file mode 100644
index 0000000..6f9be7e
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/GerritOperator.java
@@ -0,0 +1,111 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator;
+
+import static com.google.gerrit.k8s.operator.server.HttpServer.PORT;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+import com.google.inject.name.Named;
+import io.fabric8.kubernetes.api.model.Service;
+import io.fabric8.kubernetes.api.model.ServiceBuilder;
+import io.fabric8.kubernetes.api.model.ServicePort;
+import io.fabric8.kubernetes.api.model.ServicePortBuilder;
+import io.fabric8.kubernetes.client.KubernetesClient;
+import io.javaoperatorsdk.operator.Operator;
+import io.javaoperatorsdk.operator.api.reconciler.Reconciler;
+import java.util.Map;
+import java.util.Set;
+
+@Singleton
+public class GerritOperator {
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+ public static final String SERVICE_NAME = "gerrit-operator";
+ public static final int SERVICE_PORT = 8080;
+
+ private final KubernetesClient client;
+ private final LifecycleManager lifecycleManager;
+
+ @SuppressWarnings("rawtypes")
+ private final Set<Reconciler> reconcilers;
+
+ private final String namespace;
+
+ private Operator operator;
+ private Service svc;
+
+ @Inject
+ @SuppressWarnings("rawtypes")
+ public GerritOperator(
+ LifecycleManager lifecycleManager,
+ KubernetesClient client,
+ Set<Reconciler> reconcilers,
+ @Named("Namespace") String namespace) {
+ this.lifecycleManager = lifecycleManager;
+ this.client = client;
+ this.reconcilers = reconcilers;
+ this.namespace = namespace;
+ }
+
+ public void start() throws Exception {
+ operator = new Operator(client);
+ for (Reconciler<?> reconciler : reconcilers) {
+ logger.atInfo().log(
+ String.format("Registering reconciler: %s", reconciler.getClass().getSimpleName()));
+ operator.register(reconciler);
+ }
+ operator.start();
+ lifecycleManager.addShutdownHook(
+ new Runnable() {
+ @Override
+ public void run() {
+ shutdown();
+ }
+ });
+ applyService();
+ }
+
+ public void shutdown() {
+ client.resource(svc).delete();
+ operator.stop();
+ }
+
+ private void applyService() {
+ ServicePort port =
+ new ServicePortBuilder()
+ .withName("http")
+ .withPort(SERVICE_PORT)
+ .withNewTargetPort(PORT)
+ .withProtocol("TCP")
+ .build();
+ svc =
+ new ServiceBuilder()
+ .withApiVersion("v1")
+ .withNewMetadata()
+ .withName(SERVICE_NAME)
+ .withNamespace(namespace)
+ .endMetadata()
+ .withNewSpec()
+ .withType("ClusterIP")
+ .withPorts(port)
+ .withSelector(Map.of("app", "gerrit-operator"))
+ .endSpec()
+ .build();
+
+ logger.atInfo().log(String.format("Applying Service for Gerrit Operator: %s", svc.toString()));
+ client.resource(svc).createOrReplace();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/LifecycleManager.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/LifecycleManager.java
new file mode 100644
index 0000000..8e556a7
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/LifecycleManager.java
@@ -0,0 +1,39 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator;
+
+import com.google.common.collect.Lists;
+import com.google.inject.Singleton;
+import java.util.ArrayList;
+import java.util.List;
+
+@Singleton
+public class LifecycleManager {
+ private List<Runnable> shutdownHooks = new ArrayList<>();
+
+ public LifecycleManager() {
+ Runtime.getRuntime().addShutdownHook(new Thread(this::executeShutdownHooks));
+ }
+
+ public void addShutdownHook(Runnable hook) {
+ shutdownHooks.add(hook);
+ }
+
+ private void executeShutdownHooks() {
+ for (Runnable hook : Lists.reverse(shutdownHooks)) {
+ hook.run();
+ }
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/Main.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/Main.java
new file mode 100644
index 0000000..8fc1428
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/Main.java
@@ -0,0 +1,31 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator;
+
+import com.google.gerrit.k8s.operator.admission.ValidationWebhookConfigs;
+import com.google.gerrit.k8s.operator.server.HttpServer;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.Stage;
+
+public class Main {
+
+ public static void main(String[] args) throws Exception {
+ Injector injector = Guice.createInjector(Stage.PRODUCTION, new OperatorModule());
+ injector.getInstance(HttpServer.class).start();
+ injector.getInstance(ValidationWebhookConfigs.class).apply();
+ injector.getInstance(GerritOperator.class).start();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/OperatorModule.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/OperatorModule.java
new file mode 100644
index 0000000..3e61528
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/OperatorModule.java
@@ -0,0 +1,57 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator;
+
+import com.google.gerrit.k8s.operator.admission.AdmissionWebhookModule;
+import com.google.gerrit.k8s.operator.cluster.GerritClusterReconciler;
+import com.google.gerrit.k8s.operator.gerrit.GerritReconciler;
+import com.google.gerrit.k8s.operator.gitgc.GitGarbageCollectionReconciler;
+import com.google.gerrit.k8s.operator.network.GerritNetworkReconcilerProvider;
+import com.google.gerrit.k8s.operator.receiver.ReceiverReconciler;
+import com.google.gerrit.k8s.operator.server.ServerModule;
+import com.google.inject.AbstractModule;
+import com.google.inject.multibindings.Multibinder;
+import io.fabric8.kubernetes.client.Config;
+import io.fabric8.kubernetes.client.ConfigBuilder;
+import io.fabric8.kubernetes.client.KubernetesClient;
+import io.fabric8.kubernetes.client.KubernetesClientBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Reconciler;
+
+public class OperatorModule extends AbstractModule {
+ @SuppressWarnings("rawtypes")
+ @Override
+ protected void configure() {
+ install(new EnvModule());
+ install(new ServerModule());
+
+ bind(KubernetesClient.class).toInstance(getKubernetesClient());
+ bind(LifecycleManager.class);
+ bind(GerritOperator.class);
+
+ install(new AdmissionWebhookModule());
+
+ Multibinder<Reconciler> reconcilers = Multibinder.newSetBinder(binder(), Reconciler.class);
+ reconcilers.addBinding().to(GerritClusterReconciler.class);
+ reconcilers.addBinding().to(GerritReconciler.class);
+ reconcilers.addBinding().to(GitGarbageCollectionReconciler.class);
+ reconcilers.addBinding().to(ReceiverReconciler.class);
+ reconcilers.addBinding().toProvider(GerritNetworkReconcilerProvider.class);
+ }
+
+ private KubernetesClient getKubernetesClient() {
+ Config config = new ConfigBuilder().withNamespace(null).build();
+ return new KubernetesClientBuilder().withConfig(config).build();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/admission/AdmissionWebhookModule.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/admission/AdmissionWebhookModule.java
new file mode 100644
index 0000000..d3d4841
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/admission/AdmissionWebhookModule.java
@@ -0,0 +1,29 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.admission;
+
+import com.google.gerrit.k8s.operator.v1alpha.admission.GerritClusterValidationWebhookConfigApplier;
+import com.google.gerrit.k8s.operator.v1alpha.admission.GerritValidationWebhookConfigApplier;
+import com.google.gerrit.k8s.operator.v1alpha.admission.GitGcValidationWebhookConfigApplier;
+import com.google.inject.AbstractModule;
+import com.google.inject.assistedinject.FactoryModuleBuilder;
+
+public class AdmissionWebhookModule extends AbstractModule {
+ public void configure() {
+ install(new FactoryModuleBuilder().build(ValidationWebhookConfigApplier.Factory.class));
+
+ bind(ValidationWebhookConfigs.class);
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/admission/ValidationWebhookConfigApplier.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/admission/ValidationWebhookConfigApplier.java
new file mode 100644
index 0000000..443347e
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/admission/ValidationWebhookConfigApplier.java
@@ -0,0 +1,144 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.admission;
+
+import static com.google.gerrit.k8s.operator.GerritOperator.SERVICE_NAME;
+import static com.google.gerrit.k8s.operator.GerritOperator.SERVICE_PORT;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.k8s.operator.server.KeyStoreProvider;
+import com.google.inject.assistedinject.Assisted;
+import com.google.inject.assistedinject.AssistedInject;
+import com.google.inject.name.Named;
+import io.fabric8.kubernetes.api.model.admissionregistration.v1.RuleWithOperations;
+import io.fabric8.kubernetes.api.model.admissionregistration.v1.RuleWithOperationsBuilder;
+import io.fabric8.kubernetes.api.model.admissionregistration.v1.ValidatingWebhook;
+import io.fabric8.kubernetes.api.model.admissionregistration.v1.ValidatingWebhookBuilder;
+import io.fabric8.kubernetes.api.model.admissionregistration.v1.ValidatingWebhookConfiguration;
+import io.fabric8.kubernetes.api.model.admissionregistration.v1.ValidatingWebhookConfigurationBuilder;
+import io.fabric8.kubernetes.client.KubernetesClient;
+import java.io.IOException;
+import java.security.KeyStoreException;
+import java.security.NoSuchAlgorithmException;
+import java.security.NoSuchProviderException;
+import java.security.cert.CertificateEncodingException;
+import java.security.cert.CertificateException;
+import java.util.ArrayList;
+import java.util.Base64;
+import java.util.List;
+
+public class ValidationWebhookConfigApplier {
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+
+ private final KubernetesClient client;
+ private final String namespace;
+ private final KeyStoreProvider keyStoreProvider;
+ private final ValidatingWebhookConfiguration cfg;
+ private final String customResourceName;
+ private final String[] customResourceVersions;
+
+ public interface Factory {
+ ValidationWebhookConfigApplier create(
+ String customResourceName, String[] customResourceVersions);
+ }
+
+ @AssistedInject
+ ValidationWebhookConfigApplier(
+ KubernetesClient client,
+ @Named("Namespace") String namespace,
+ KeyStoreProvider keyStoreProvider,
+ @Assisted String customResourceName,
+ @Assisted String[] customResourceVersions) {
+ this.client = client;
+ this.namespace = namespace;
+ this.keyStoreProvider = keyStoreProvider;
+ this.customResourceName = customResourceName;
+ this.customResourceVersions = customResourceVersions;
+
+ this.cfg = build();
+ }
+
+ public List<RuleWithOperations> rules(String version) {
+ return List.of(
+ new RuleWithOperationsBuilder()
+ .withApiGroups("gerritoperator.google.com")
+ .withApiVersions(version)
+ .withOperations("CREATE", "UPDATE")
+ .withResources(customResourceName)
+ .withScope("*")
+ .build());
+ }
+
+ public List<ValidatingWebhook> webhooks()
+ throws CertificateEncodingException, KeyStoreException, NoSuchAlgorithmException,
+ CertificateException, IOException {
+ List<ValidatingWebhook> webhooks = new ArrayList<>();
+ for (String version : customResourceVersions) {
+ webhooks.add(
+ new ValidatingWebhookBuilder()
+ .withName(customResourceName.toLowerCase() + "." + version + ".validator.google.com")
+ .withAdmissionReviewVersions("v1", "v1beta1")
+ .withNewClientConfig()
+ .withCaBundle(caBundle())
+ .withNewService()
+ .withName(SERVICE_NAME)
+ .withNamespace(namespace)
+ .withPath(
+ String.format("/admission/%s/%s", version, customResourceName).toLowerCase())
+ .withPort(SERVICE_PORT)
+ .endService()
+ .endClientConfig()
+ .withFailurePolicy("Fail")
+ .withMatchPolicy("Equivalent")
+ .withRules(rules(version))
+ .withTimeoutSeconds(10)
+ .withSideEffects("None")
+ .build());
+ }
+ return webhooks;
+ }
+
+ private String caBundle()
+ throws CertificateEncodingException, KeyStoreException, NoSuchAlgorithmException,
+ CertificateException, IOException {
+ return Base64.getEncoder().encodeToString(keyStoreProvider.getCertificate().getBytes());
+ }
+
+ public ValidatingWebhookConfiguration build() {
+ try {
+ return new ValidatingWebhookConfigurationBuilder()
+ .withNewMetadata()
+ .withName(customResourceName.toLowerCase())
+ .endMetadata()
+ .withWebhooks(webhooks())
+ .build();
+ } catch (CertificateException | IOException | KeyStoreException | NoSuchAlgorithmException e) {
+ throw new RuntimeException(
+ "Failed to deploy ValidationWebhookConfiguration " + customResourceName, e);
+ }
+ }
+
+ public void apply()
+ throws KeyStoreException, NoSuchProviderException, IOException, NoSuchAlgorithmException,
+ CertificateException {
+ logger.atInfo().log("Applying webhook config %s", cfg);
+ client.resource(cfg).createOrReplace();
+ }
+
+ public void delete() {
+ logger.atInfo().log("Deleting webhook config %s", cfg);
+ client.resource(cfg).delete();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/admission/ValidationWebhookConfigs.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/admission/ValidationWebhookConfigs.java
new file mode 100644
index 0000000..901d15f
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/admission/ValidationWebhookConfigs.java
@@ -0,0 +1,60 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.admission;
+
+import static com.google.gerrit.k8s.operator.Constants.CUSTOM_RESOURCES;
+import static com.google.gerrit.k8s.operator.Constants.VERSIONS;
+
+import com.google.gerrit.k8s.operator.LifecycleManager;
+import com.google.inject.Inject;
+import java.util.ArrayList;
+import java.util.List;
+
+public class ValidationWebhookConfigs {
+
+ private final List<ValidationWebhookConfigApplier> configAppliers;
+
+ @Inject
+ public ValidationWebhookConfigs(
+ LifecycleManager lifecycleManager,
+ ValidationWebhookConfigApplier.Factory configApplierFactory) {
+ this.configAppliers = new ArrayList<>();
+
+ for (String customResourceName : CUSTOM_RESOURCES) {
+ this.configAppliers.add(configApplierFactory.create(customResourceName, VERSIONS));
+ }
+
+ lifecycleManager.addShutdownHook(
+ new Runnable() {
+
+ @Override
+ public void run() {
+ delete();
+ }
+ });
+ }
+
+ public void apply() throws Exception {
+ for (ValidationWebhookConfigApplier applier : configAppliers) {
+ applier.apply();
+ }
+ }
+
+ public void delete() {
+ for (ValidationWebhookConfigApplier applier : configAppliers) {
+ applier.delete();
+ }
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/GerritClusterReconciler.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/GerritClusterReconciler.java
new file mode 100644
index 0000000..c00d51e
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/GerritClusterReconciler.java
@@ -0,0 +1,149 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.cluster;
+
+import static com.google.gerrit.k8s.operator.cluster.GerritClusterReconciler.CLUSTER_MANAGED_GERRIT_EVENT_SOURCE;
+import static com.google.gerrit.k8s.operator.cluster.GerritClusterReconciler.CLUSTER_MANAGED_GERRIT_NETWORK_EVENT_SOURCE;
+import static com.google.gerrit.k8s.operator.cluster.GerritClusterReconciler.CLUSTER_MANAGED_RECEIVER_EVENT_SOURCE;
+import static com.google.gerrit.k8s.operator.cluster.GerritClusterReconciler.CM_EVENT_SOURCE;
+import static com.google.gerrit.k8s.operator.cluster.GerritClusterReconciler.PVC_EVENT_SOURCE;
+
+import com.google.gerrit.k8s.operator.cluster.dependent.ClusterManagedGerrit;
+import com.google.gerrit.k8s.operator.cluster.dependent.ClusterManagedGerritCondition;
+import com.google.gerrit.k8s.operator.cluster.dependent.ClusterManagedGerritNetwork;
+import com.google.gerrit.k8s.operator.cluster.dependent.ClusterManagedGerritNetworkCondition;
+import com.google.gerrit.k8s.operator.cluster.dependent.ClusterManagedReceiver;
+import com.google.gerrit.k8s.operator.cluster.dependent.ClusterManagedReceiverCondition;
+import com.google.gerrit.k8s.operator.cluster.dependent.NfsIdmapdConfigMap;
+import com.google.gerrit.k8s.operator.cluster.dependent.NfsWorkaroundCondition;
+import com.google.gerrit.k8s.operator.cluster.dependent.SharedPVC;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritClusterStatus;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplate;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.Receiver;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.ReceiverTemplate;
+import com.google.inject.Singleton;
+import io.fabric8.kubernetes.api.model.ConfigMap;
+import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
+import io.javaoperatorsdk.operator.api.config.informer.InformerConfiguration;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.ControllerConfiguration;
+import io.javaoperatorsdk.operator.api.reconciler.EventSourceContext;
+import io.javaoperatorsdk.operator.api.reconciler.EventSourceInitializer;
+import io.javaoperatorsdk.operator.api.reconciler.Reconciler;
+import io.javaoperatorsdk.operator.api.reconciler.UpdateControl;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.Dependent;
+import io.javaoperatorsdk.operator.processing.event.source.EventSource;
+import io.javaoperatorsdk.operator.processing.event.source.informer.InformerEventSource;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+@Singleton
+@ControllerConfiguration(
+ dependents = {
+ @Dependent(
+ name = "shared-pvc",
+ type = SharedPVC.class,
+ useEventSourceWithName = PVC_EVENT_SOURCE),
+ @Dependent(
+ type = NfsIdmapdConfigMap.class,
+ reconcilePrecondition = NfsWorkaroundCondition.class,
+ useEventSourceWithName = CM_EVENT_SOURCE),
+ @Dependent(
+ name = "gerrits",
+ type = ClusterManagedGerrit.class,
+ reconcilePrecondition = ClusterManagedGerritCondition.class,
+ useEventSourceWithName = CLUSTER_MANAGED_GERRIT_EVENT_SOURCE),
+ @Dependent(
+ name = "receiver",
+ type = ClusterManagedReceiver.class,
+ reconcilePrecondition = ClusterManagedReceiverCondition.class,
+ useEventSourceWithName = CLUSTER_MANAGED_RECEIVER_EVENT_SOURCE),
+ @Dependent(
+ type = ClusterManagedGerritNetwork.class,
+ reconcilePrecondition = ClusterManagedGerritNetworkCondition.class,
+ useEventSourceWithName = CLUSTER_MANAGED_GERRIT_NETWORK_EVENT_SOURCE),
+ })
+public class GerritClusterReconciler
+ implements Reconciler<GerritCluster>, EventSourceInitializer<GerritCluster> {
+ public static final String CM_EVENT_SOURCE = "cm-event-source";
+ public static final String PVC_EVENT_SOURCE = "pvc-event-source";
+ public static final String CLUSTER_MANAGED_GERRIT_EVENT_SOURCE = "cluster-managed-gerrit";
+ public static final String CLUSTER_MANAGED_RECEIVER_EVENT_SOURCE = "cluster-managed-receiver";
+ public static final String CLUSTER_MANAGED_GERRIT_NETWORK_EVENT_SOURCE =
+ "cluster-managed-gerrit-network";
+
+ @Override
+ public Map<String, EventSource> prepareEventSources(EventSourceContext<GerritCluster> context) {
+ InformerEventSource<ConfigMap, GerritCluster> cmEventSource =
+ new InformerEventSource<>(
+ InformerConfiguration.from(ConfigMap.class, context).build(), context);
+
+ InformerEventSource<PersistentVolumeClaim, GerritCluster> pvcEventSource =
+ new InformerEventSource<>(
+ InformerConfiguration.from(PersistentVolumeClaim.class, context).build(), context);
+
+ InformerEventSource<Gerrit, GerritCluster> clusterManagedGerritEventSource =
+ new InformerEventSource<>(
+ InformerConfiguration.from(Gerrit.class, context).build(), context);
+
+ InformerEventSource<Receiver, GerritCluster> clusterManagedReceiverEventSource =
+ new InformerEventSource<>(
+ InformerConfiguration.from(Receiver.class, context).build(), context);
+
+ InformerEventSource<GerritNetwork, GerritCluster> clusterManagedGerritNetworkEventSource =
+ new InformerEventSource<>(
+ InformerConfiguration.from(GerritNetwork.class, context).build(), context);
+
+ Map<String, EventSource> eventSources = new HashMap<>();
+ eventSources.put(CM_EVENT_SOURCE, cmEventSource);
+ eventSources.put(PVC_EVENT_SOURCE, pvcEventSource);
+ eventSources.put(CLUSTER_MANAGED_GERRIT_EVENT_SOURCE, clusterManagedGerritEventSource);
+ eventSources.put(CLUSTER_MANAGED_RECEIVER_EVENT_SOURCE, clusterManagedReceiverEventSource);
+ eventSources.put(
+ CLUSTER_MANAGED_GERRIT_NETWORK_EVENT_SOURCE, clusterManagedGerritNetworkEventSource);
+ return eventSources;
+ }
+
+ @Override
+ public UpdateControl<GerritCluster> reconcile(
+ GerritCluster gerritCluster, Context<GerritCluster> context) {
+ List<GerritTemplate> managedGerrits = gerritCluster.getSpec().getGerrits();
+ Map<String, List<String>> members = new HashMap<>();
+ members.put(
+ "gerrit",
+ managedGerrits.stream().map(g -> g.getMetadata().getName()).collect(Collectors.toList()));
+ ReceiverTemplate managedReceiver = gerritCluster.getSpec().getReceiver();
+ if (managedReceiver != null) {
+ members.put("receiver", List.of(managedReceiver.getMetadata().getName()));
+ }
+ return UpdateControl.patchStatus(updateStatus(gerritCluster, members));
+ }
+
+ private GerritCluster updateStatus(
+ GerritCluster gerritCluster, Map<String, List<String>> members) {
+ GerritClusterStatus status = gerritCluster.getStatus();
+ if (status == null) {
+ status = new GerritClusterStatus();
+ }
+ status.setMembers(members);
+ gerritCluster.setStatus(status);
+ return gerritCluster;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/ClusterManagedGerrit.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/ClusterManagedGerrit.java
new file mode 100644
index 0000000..8de0b3e
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/ClusterManagedGerrit.java
@@ -0,0 +1,66 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.cluster.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplate;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.Deleter;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.GarbageCollected;
+import io.javaoperatorsdk.operator.processing.dependent.BulkDependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.Creator;
+import io.javaoperatorsdk.operator.processing.dependent.Updater;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependentResource;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+public class ClusterManagedGerrit extends KubernetesDependentResource<Gerrit, GerritCluster>
+ implements Creator<Gerrit, GerritCluster>,
+ Updater<Gerrit, GerritCluster>,
+ Deleter<GerritCluster>,
+ BulkDependentResource<Gerrit, GerritCluster>,
+ GarbageCollected<GerritCluster> {
+
+ public ClusterManagedGerrit() {
+ super(Gerrit.class);
+ }
+
+ @Override
+ public Map<String, Gerrit> desiredResources(
+ GerritCluster gerritCluster, Context<GerritCluster> context) {
+ Map<String, Gerrit> gerrits = new HashMap<>();
+ for (GerritTemplate template : gerritCluster.getSpec().getGerrits()) {
+ gerrits.put(template.getMetadata().getName(), desired(gerritCluster, template));
+ }
+ return gerrits;
+ }
+
+ private Gerrit desired(GerritCluster gerritCluster, GerritTemplate template) {
+ return template.toGerrit(gerritCluster);
+ }
+
+ @Override
+ public Map<String, Gerrit> getSecondaryResources(
+ GerritCluster primary, Context<GerritCluster> context) {
+ Set<Gerrit> gerrits = context.getSecondaryResources(Gerrit.class);
+ Map<String, Gerrit> result = new HashMap<>(gerrits.size());
+ for (Gerrit gerrit : gerrits) {
+ result.put(gerrit.getMetadata().getName(), gerrit);
+ }
+ return result;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/ClusterManagedGerritCondition.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/ClusterManagedGerritCondition.java
new file mode 100644
index 0000000..7455aca
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/ClusterManagedGerritCondition.java
@@ -0,0 +1,32 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.cluster.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.DependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.workflow.Condition;
+
+public class ClusterManagedGerritCondition implements Condition<Gerrit, GerritCluster> {
+
+ @Override
+ public boolean isMet(
+ DependentResource<Gerrit, GerritCluster> dependentResource,
+ GerritCluster gerritCluster,
+ Context<GerritCluster> context) {
+ return !gerritCluster.getSpec().getGerrits().isEmpty();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/ClusterManagedGerritNetwork.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/ClusterManagedGerritNetwork.java
new file mode 100644
index 0000000..232c791
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/ClusterManagedGerritNetwork.java
@@ -0,0 +1,81 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.cluster.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplate;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplateSpec.GerritMode;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetworkSpec;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.NetworkMember;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.NetworkMemberWithSsh;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.ReceiverTemplate;
+import io.fabric8.kubernetes.api.model.ObjectMetaBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.CRUDKubernetesDependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import java.util.Optional;
+
+@KubernetesDependent
+public class ClusterManagedGerritNetwork
+ extends CRUDKubernetesDependentResource<GerritNetwork, GerritCluster> {
+ public static final String NAME_SUFFIX = "gerrit-network";
+
+ public ClusterManagedGerritNetwork() {
+ super(GerritNetwork.class);
+ }
+
+ @Override
+ public GerritNetwork desired(GerritCluster gerritCluster, Context<GerritCluster> context) {
+ GerritNetwork gerritNetwork = new GerritNetwork();
+ gerritNetwork.setMetadata(
+ new ObjectMetaBuilder()
+ .withName(gerritCluster.getDependentResourceName(NAME_SUFFIX))
+ .withNamespace(gerritCluster.getMetadata().getNamespace())
+ .build());
+ GerritNetworkSpec gerritNetworkSpec = new GerritNetworkSpec();
+
+ Optional<GerritTemplate> optionalPrimaryGerrit =
+ gerritCluster.getSpec().getGerrits().stream()
+ .filter(g -> g.getSpec().getMode().equals(GerritMode.PRIMARY))
+ .findFirst();
+ if (optionalPrimaryGerrit.isPresent()) {
+ GerritTemplate primaryGerrit = optionalPrimaryGerrit.get();
+ gerritNetworkSpec.setPrimaryGerrit(
+ new NetworkMemberWithSsh(
+ primaryGerrit.getMetadata().getName(), primaryGerrit.getSpec().getService()));
+ }
+
+ Optional<GerritTemplate> optionalGerritReplica =
+ gerritCluster.getSpec().getGerrits().stream()
+ .filter(g -> g.getSpec().getMode().equals(GerritMode.REPLICA))
+ .findFirst();
+ if (optionalGerritReplica.isPresent()) {
+ GerritTemplate gerritReplica = optionalGerritReplica.get();
+ gerritNetworkSpec.setGerritReplica(
+ new NetworkMemberWithSsh(
+ gerritReplica.getMetadata().getName(), gerritReplica.getSpec().getService()));
+ }
+
+ ReceiverTemplate receiver = gerritCluster.getSpec().getReceiver();
+ if (receiver != null) {
+ gerritNetworkSpec.setReceiver(
+ new NetworkMember(receiver.getMetadata().getName(), receiver.getSpec().getService()));
+ }
+ gerritNetworkSpec.setIngress(gerritCluster.getSpec().getIngress());
+ gerritNetwork.setSpec(gerritNetworkSpec);
+ return gerritNetwork;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/ClusterManagedGerritNetworkCondition.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/ClusterManagedGerritNetworkCondition.java
new file mode 100644
index 0000000..a5b9244
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/ClusterManagedGerritNetworkCondition.java
@@ -0,0 +1,33 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.cluster.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.DependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.workflow.Condition;
+
+public class ClusterManagedGerritNetworkCondition
+ implements Condition<GerritNetwork, GerritCluster> {
+
+ @Override
+ public boolean isMet(
+ DependentResource<GerritNetwork, GerritCluster> dependentResource,
+ GerritCluster gerritCluster,
+ Context<GerritCluster> context) {
+ return gerritCluster.getSpec().getIngress().isEnabled();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/ClusterManagedReceiver.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/ClusterManagedReceiver.java
new file mode 100644
index 0000000..62618a2
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/ClusterManagedReceiver.java
@@ -0,0 +1,33 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.cluster.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.Receiver;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.CRUDKubernetesDependentResource;
+
+public class ClusterManagedReceiver
+ extends CRUDKubernetesDependentResource<Receiver, GerritCluster> {
+
+ public ClusterManagedReceiver() {
+ super(Receiver.class);
+ }
+
+ @Override
+ public Receiver desired(GerritCluster gerritCluster, Context<GerritCluster> context) {
+ return gerritCluster.getSpec().getReceiver().toReceiver(gerritCluster);
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/ClusterManagedReceiverCondition.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/ClusterManagedReceiverCondition.java
new file mode 100644
index 0000000..aa27446
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/ClusterManagedReceiverCondition.java
@@ -0,0 +1,32 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.cluster.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.Receiver;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.DependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.workflow.Condition;
+
+public class ClusterManagedReceiverCondition implements Condition<Receiver, GerritCluster> {
+
+ @Override
+ public boolean isMet(
+ DependentResource<Receiver, GerritCluster> dependentResource,
+ GerritCluster gerritCluster,
+ Context<GerritCluster> context) {
+ return gerritCluster.getSpec().getReceiver() != null;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/NfsIdmapdConfigMap.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/NfsIdmapdConfigMap.java
new file mode 100644
index 0000000..623e801
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/NfsIdmapdConfigMap.java
@@ -0,0 +1,52 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.cluster.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import io.fabric8.kubernetes.api.model.ConfigMap;
+import io.fabric8.kubernetes.api.model.ConfigMapBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.CRUDKubernetesDependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import java.util.Map;
+
+@KubernetesDependent(resourceDiscriminator = NfsIdmapdConfigMapDiscriminator.class)
+public class NfsIdmapdConfigMap extends CRUDKubernetesDependentResource<ConfigMap, GerritCluster> {
+ public static final String NFS_IDMAPD_CM_NAME = "nfs-idmapd-config";
+
+ public NfsIdmapdConfigMap() {
+ super(ConfigMap.class);
+ }
+
+ @Override
+ protected ConfigMap desired(GerritCluster gerritCluster, Context<GerritCluster> context) {
+ return new ConfigMapBuilder()
+ .withNewMetadata()
+ .withName(NFS_IDMAPD_CM_NAME)
+ .withNamespace(gerritCluster.getMetadata().getNamespace())
+ .withLabels(gerritCluster.getLabels(NFS_IDMAPD_CM_NAME, this.getClass().getSimpleName()))
+ .endMetadata()
+ .withData(
+ Map.of(
+ "idmapd.conf",
+ gerritCluster
+ .getSpec()
+ .getStorage()
+ .getStorageClasses()
+ .getNfsWorkaround()
+ .getIdmapdConfig()))
+ .build();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/NfsIdmapdConfigMapDiscriminator.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/NfsIdmapdConfigMapDiscriminator.java
new file mode 100644
index 0000000..17dfea7
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/NfsIdmapdConfigMapDiscriminator.java
@@ -0,0 +1,42 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.cluster.dependent;
+
+import static com.google.gerrit.k8s.operator.cluster.GerritClusterReconciler.CM_EVENT_SOURCE;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import io.fabric8.kubernetes.api.model.ConfigMap;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.ResourceDiscriminator;
+import io.javaoperatorsdk.operator.processing.event.ResourceID;
+import io.javaoperatorsdk.operator.processing.event.source.informer.InformerEventSource;
+import java.util.Optional;
+
+public class NfsIdmapdConfigMapDiscriminator
+ implements ResourceDiscriminator<ConfigMap, GerritCluster> {
+ @Override
+ public Optional<ConfigMap> distinguish(
+ Class<ConfigMap> resource, GerritCluster primary, Context<GerritCluster> context) {
+ InformerEventSource<ConfigMap, GerritCluster> ies =
+ (InformerEventSource<ConfigMap, GerritCluster>)
+ context
+ .eventSourceRetriever()
+ .getResourceEventSourceFor(ConfigMap.class, CM_EVENT_SOURCE);
+
+ return ies.get(
+ new ResourceID(
+ NfsIdmapdConfigMap.NFS_IDMAPD_CM_NAME, primary.getMetadata().getNamespace()));
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/NfsWorkaroundCondition.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/NfsWorkaroundCondition.java
new file mode 100644
index 0000000..a0cccc0
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/NfsWorkaroundCondition.java
@@ -0,0 +1,34 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.cluster.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.NfsWorkaroundConfig;
+import io.fabric8.kubernetes.api.model.ConfigMap;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.DependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.workflow.Condition;
+
+public class NfsWorkaroundCondition implements Condition<ConfigMap, GerritCluster> {
+ @Override
+ public boolean isMet(
+ DependentResource<ConfigMap, GerritCluster> dependentResource,
+ GerritCluster gerritCluster,
+ Context<GerritCluster> context) {
+ NfsWorkaroundConfig cfg =
+ gerritCluster.getSpec().getStorage().getStorageClasses().getNfsWorkaround();
+ return cfg.isEnabled() && cfg.getIdmapdConfig() != null;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/SharedPVC.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/SharedPVC.java
new file mode 100644
index 0000000..099afc6
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/SharedPVC.java
@@ -0,0 +1,54 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.cluster.dependent;
+
+import com.google.gerrit.k8s.operator.util.CRUDKubernetesDependentPVCResource;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GerritStorageConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.SharedStorage;
+import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
+import io.fabric8.kubernetes.api.model.PersistentVolumeClaimBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import java.util.Map;
+
+@KubernetesDependent(resourceDiscriminator = SharedPVCDiscriminator.class)
+public class SharedPVC extends CRUDKubernetesDependentPVCResource<GerritCluster> {
+
+ public static final String SHARED_PVC_NAME = "shared-pvc";
+
+ @Override
+ protected PersistentVolumeClaim desiredPVC(
+ GerritCluster gerritCluster, Context<GerritCluster> context) {
+ GerritStorageConfig storageConfig = gerritCluster.getSpec().getStorage();
+ SharedStorage sharedStorage = storageConfig.getSharedStorage();
+ return new PersistentVolumeClaimBuilder()
+ .withNewMetadata()
+ .withName(SHARED_PVC_NAME)
+ .withNamespace(gerritCluster.getMetadata().getNamespace())
+ .withLabels(gerritCluster.getLabels("shared-storage", this.getClass().getSimpleName()))
+ .endMetadata()
+ .withNewSpec()
+ .withAccessModes("ReadWriteMany")
+ .withNewResources()
+ .withRequests(Map.of("storage", sharedStorage.getSize()))
+ .endResources()
+ .withStorageClassName(storageConfig.getStorageClasses().getReadWriteMany())
+ .withSelector(sharedStorage.getSelector())
+ .withVolumeName(sharedStorage.getVolumeName())
+ .endSpec()
+ .build();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/SharedPVCDiscriminator.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/SharedPVCDiscriminator.java
new file mode 100644
index 0000000..52fe941
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/cluster/dependent/SharedPVCDiscriminator.java
@@ -0,0 +1,42 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.cluster.dependent;
+
+import static com.google.gerrit.k8s.operator.cluster.GerritClusterReconciler.PVC_EVENT_SOURCE;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.ResourceDiscriminator;
+import io.javaoperatorsdk.operator.processing.event.ResourceID;
+import io.javaoperatorsdk.operator.processing.event.source.informer.InformerEventSource;
+import java.util.Optional;
+
+public class SharedPVCDiscriminator
+ implements ResourceDiscriminator<PersistentVolumeClaim, GerritCluster> {
+ @Override
+ public Optional<PersistentVolumeClaim> distinguish(
+ Class<PersistentVolumeClaim> resource,
+ GerritCluster primary,
+ Context<GerritCluster> context) {
+ InformerEventSource<PersistentVolumeClaim, GerritCluster> ies =
+ (InformerEventSource<PersistentVolumeClaim, GerritCluster>)
+ context
+ .eventSourceRetriever()
+ .getResourceEventSourceFor(PersistentVolumeClaim.class, PVC_EVENT_SOURCE);
+
+ return ies.get(new ResourceID(SharedPVC.SHARED_PVC_NAME, primary.getMetadata().getNamespace()));
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/GerritReconciler.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/GerritReconciler.java
new file mode 100644
index 0000000..04d0a2f
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/GerritReconciler.java
@@ -0,0 +1,146 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gerrit;
+
+import static com.google.gerrit.k8s.operator.gerrit.GerritReconciler.CONFIG_MAP_EVENT_SOURCE;
+import static com.google.gerrit.k8s.operator.gerrit.dependent.GerritSecret.CONTEXT_SECRET_VERSION_KEY;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.k8s.operator.gerrit.dependent.GerritConfigMap;
+import com.google.gerrit.k8s.operator.gerrit.dependent.GerritInitConfigMap;
+import com.google.gerrit.k8s.operator.gerrit.dependent.GerritSecret;
+import com.google.gerrit.k8s.operator.gerrit.dependent.GerritService;
+import com.google.gerrit.k8s.operator.gerrit.dependent.GerritStatefulSet;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritStatus;
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+import io.fabric8.kubernetes.api.model.ConfigMap;
+import io.fabric8.kubernetes.client.KubernetesClient;
+import io.javaoperatorsdk.operator.api.config.informer.InformerConfiguration;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.ControllerConfiguration;
+import io.javaoperatorsdk.operator.api.reconciler.EventSourceContext;
+import io.javaoperatorsdk.operator.api.reconciler.EventSourceInitializer;
+import io.javaoperatorsdk.operator.api.reconciler.Reconciler;
+import io.javaoperatorsdk.operator.api.reconciler.UpdateControl;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.Dependent;
+import io.javaoperatorsdk.operator.processing.dependent.workflow.WorkflowReconcileResult;
+import io.javaoperatorsdk.operator.processing.event.source.EventSource;
+import io.javaoperatorsdk.operator.processing.event.source.informer.InformerEventSource;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+
+@Singleton
+@ControllerConfiguration(
+ dependents = {
+ @Dependent(name = "gerrit-secret", type = GerritSecret.class),
+ @Dependent(
+ name = "gerrit-configmap",
+ type = GerritConfigMap.class,
+ useEventSourceWithName = CONFIG_MAP_EVENT_SOURCE),
+ @Dependent(
+ name = "gerrit-init-configmap",
+ type = GerritInitConfigMap.class,
+ useEventSourceWithName = CONFIG_MAP_EVENT_SOURCE),
+ @Dependent(
+ name = "gerrit-statefulset",
+ type = GerritStatefulSet.class,
+ dependsOn = {"gerrit-configmap", "gerrit-init-configmap"}),
+ @Dependent(
+ name = "gerrit-service",
+ type = GerritService.class,
+ dependsOn = {"gerrit-statefulset"})
+ })
+public class GerritReconciler implements Reconciler<Gerrit>, EventSourceInitializer<Gerrit> {
+ public static final String CONFIG_MAP_EVENT_SOURCE = "configmap-event-source";
+
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+
+ private final KubernetesClient client;
+
+ @Inject
+ public GerritReconciler(KubernetesClient client) {
+ this.client = client;
+ }
+
+ @Override
+ public Map<String, EventSource> prepareEventSources(EventSourceContext<Gerrit> context) {
+ InformerEventSource<ConfigMap, Gerrit> configmapEventSource =
+ new InformerEventSource<>(
+ InformerConfiguration.from(ConfigMap.class, context).build(), context);
+
+ Map<String, EventSource> eventSources = new HashMap<>();
+ eventSources.put(CONFIG_MAP_EVENT_SOURCE, configmapEventSource);
+ return eventSources;
+ }
+
+ @Override
+ public UpdateControl<Gerrit> reconcile(Gerrit gerrit, Context<Gerrit> context) throws Exception {
+ return UpdateControl.patchStatus(updateStatus(gerrit, context));
+ }
+
+ private Gerrit updateStatus(Gerrit gerrit, Context<Gerrit> context) {
+ GerritStatus status = gerrit.getStatus();
+ if (status == null) {
+ status = new GerritStatus();
+ }
+ Optional<WorkflowReconcileResult> result =
+ context.managedDependentResourceContext().getWorkflowReconcileResult();
+ if (result.isPresent()) {
+ status.setReady(result.get().allDependentResourcesReady());
+ } else {
+ status.setReady(false);
+ }
+
+ Map<String, String> cmVersions = new HashMap<>();
+
+ cmVersions.put(
+ GerritConfigMap.getName(gerrit),
+ client
+ .configMaps()
+ .inNamespace(gerrit.getMetadata().getNamespace())
+ .withName(GerritConfigMap.getName(gerrit))
+ .get()
+ .getMetadata()
+ .getResourceVersion());
+
+ cmVersions.put(
+ GerritInitConfigMap.getName(gerrit),
+ client
+ .configMaps()
+ .inNamespace(gerrit.getMetadata().getNamespace())
+ .withName(GerritInitConfigMap.getName(gerrit))
+ .get()
+ .getMetadata()
+ .getResourceVersion());
+
+ logger.atFine().log("Adding ConfigMap versions: %s", cmVersions);
+ status.setAppliedConfigMapVersions(cmVersions);
+
+ Map<String, String> secretVersions = new HashMap<>();
+ Optional<String> gerritSecret =
+ context.managedDependentResourceContext().get(CONTEXT_SECRET_VERSION_KEY, String.class);
+ if (gerritSecret.isPresent()) {
+ secretVersions.put(gerrit.getSpec().getSecretRef(), gerritSecret.get());
+ }
+ logger.atFine().log("Adding Secret versions: %s", secretVersions);
+ status.setAppliedSecretVersions(secretVersions);
+
+ gerrit.setStatus(status);
+ return gerrit;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/config/ConfigBuilder.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/config/ConfigBuilder.java
new file mode 100644
index 0000000..ca58e94
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/config/ConfigBuilder.java
@@ -0,0 +1,91 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gerrit.config;
+
+import com.google.common.collect.ImmutableList;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
+import org.eclipse.jgit.errors.ConfigInvalidException;
+import org.eclipse.jgit.lib.Config;
+
+public abstract class ConfigBuilder {
+
+ private final ImmutableList<RequiredOption<?>> requiredOptions;
+ private final Config config;
+
+ ConfigBuilder(Config baseConfig, ImmutableList<RequiredOption<?>> requiredOptions) {
+ this.config = baseConfig;
+ this.requiredOptions = requiredOptions;
+ }
+
+ protected ConfigBuilder(String baseConfig, ImmutableList<RequiredOption<?>> requiredOptions) {
+ this.config = parseConfig(baseConfig);
+ this.requiredOptions = requiredOptions;
+ }
+
+ public Config build() {
+ ConfigValidator configValidator = new ConfigValidator(requiredOptions);
+ try {
+ configValidator.check(config);
+ } catch (InvalidGerritConfigException e) {
+ throw new IllegalStateException(e);
+ }
+ setRequiredOptions();
+ return config;
+ }
+
+ public void validate() throws InvalidGerritConfigException {
+ new ConfigValidator(requiredOptions).check(config);
+ }
+
+ public List<RequiredOption<?>> getRequiredOptions() {
+ return this.requiredOptions;
+ }
+
+ protected Config parseConfig(String text) {
+ Config cfg = new Config();
+ try {
+ cfg.fromText(text);
+ } catch (ConfigInvalidException e) {
+ throw new IllegalStateException("Invalid configuration: " + text, e);
+ }
+ return cfg;
+ }
+
+ @SuppressWarnings("unchecked")
+ private void setRequiredOptions() {
+ for (RequiredOption<?> opt : requiredOptions) {
+ if (opt.getExpected() instanceof String) {
+ config.setString(
+ opt.getSection(), opt.getSubSection(), opt.getKey(), (String) opt.getExpected());
+ } else if (opt.getExpected() instanceof Boolean) {
+ config.setBoolean(
+ opt.getSection(), opt.getSubSection(), opt.getKey(), (Boolean) opt.getExpected());
+ } else if (opt.getExpected() instanceof Set) {
+ List<String> values =
+ new ArrayList<String>(
+ Arrays.asList(
+ config.getStringList(opt.getSection(), opt.getSubSection(), opt.getKey())));
+ List<String> expectedSet = new ArrayList<String>();
+ expectedSet.addAll((Set<String>) opt.getExpected());
+ expectedSet.removeAll(values);
+ values.addAll(expectedSet);
+ config.setStringList(opt.getSection(), opt.getSubSection(), opt.getKey(), values);
+ }
+ }
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/config/ConfigValidator.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/config/ConfigValidator.java
new file mode 100644
index 0000000..bc952a1
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/config/ConfigValidator.java
@@ -0,0 +1,56 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gerrit.config;
+
+import java.util.List;
+import java.util.Set;
+import org.eclipse.jgit.lib.Config;
+
+public class ConfigValidator {
+ private final List<RequiredOption<?>> requiredOptions;
+
+ public ConfigValidator(List<RequiredOption<?>> requiredOptions) {
+ this.requiredOptions = requiredOptions;
+ }
+
+ public void check(Config cfg) throws InvalidGerritConfigException {
+ for (RequiredOption<?> opt : requiredOptions) {
+ checkOption(cfg, opt);
+ }
+ }
+
+ private void checkOption(Config cfg, RequiredOption<?> opt) throws InvalidGerritConfigException {
+ if (!optionExists(cfg, opt)) {
+ return;
+ }
+ if (opt.getExpected() instanceof Set) {
+ return;
+ } else {
+ String value = cfg.getString(opt.getSection(), opt.getSubSection(), opt.getKey());
+ if (isExpectedValue(value, opt)) {
+ return;
+ }
+ throw new InvalidGerritConfigException(value, opt);
+ }
+ }
+
+ private boolean optionExists(Config cfg, RequiredOption<?> opt) {
+ return cfg.getNames(opt.getSection(), opt.getSubSection()).contains(opt.getKey());
+ }
+
+ private boolean isExpectedValue(String value, RequiredOption<?> opt) {
+ return value.equals(opt.getExpected().toString());
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/config/InvalidGerritConfigException.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/config/InvalidGerritConfigException.java
new file mode 100644
index 0000000..6ab14bd
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/config/InvalidGerritConfigException.java
@@ -0,0 +1,26 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gerrit.config;
+
+public class InvalidGerritConfigException extends Exception {
+ private static final long serialVersionUID = 1L;
+
+ public InvalidGerritConfigException(String value, RequiredOption<?> opt) {
+ super(
+ String.format(
+ "Option %s.%s.%s set to unsupported value %s. Expected %s.",
+ opt.getSection(), opt.getSubSection(), opt.getKey(), value, opt.getExpected()));
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/config/RequiredOption.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/config/RequiredOption.java
new file mode 100644
index 0000000..9f4d22c
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/config/RequiredOption.java
@@ -0,0 +1,49 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gerrit.config;
+
+public class RequiredOption<T> {
+ private final String section;
+ private final String subSection;
+ private final String key;
+ private final T expected;
+
+ public RequiredOption(String section, String subSection, String key, T expected) {
+ this.section = section;
+ this.subSection = subSection;
+ this.key = key;
+ this.expected = expected;
+ }
+
+ public RequiredOption(String section, String key, T expected) {
+ this(section, null, key, expected);
+ }
+
+ public String getSection() {
+ return section;
+ }
+
+ public String getSubSection() {
+ return subSection;
+ }
+
+ public String getKey() {
+ return key;
+ }
+
+ public T getExpected() {
+ return expected;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritConfigMap.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritConfigMap.java
new file mode 100644
index 0000000..22001b9
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritConfigMap.java
@@ -0,0 +1,91 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gerrit.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import com.google.gerrit.k8s.operator.v1alpha.gerrit.config.GerritConfigBuilder;
+import com.google.gerrit.k8s.operator.v1alpha.gerrit.config.HighAvailabilityPluginConfigBuilder;
+import com.google.gerrit.k8s.operator.v1alpha.gerrit.config.SpannerRefDbPluginConfigBuilder;
+import com.google.gerrit.k8s.operator.v1alpha.gerrit.config.ZookeeperRefDbPluginConfigBuilder;
+import io.fabric8.kubernetes.api.model.ConfigMap;
+import io.fabric8.kubernetes.api.model.ConfigMapBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.CRUDKubernetesDependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import java.util.Map;
+
+@KubernetesDependent(resourceDiscriminator = GerritConfigMapDiscriminator.class)
+public class GerritConfigMap extends CRUDKubernetesDependentResource<ConfigMap, Gerrit> {
+ private static final String DEFAULT_HEALTHCHECK_CONFIG =
+ "[healthcheck \"auth\"]\nenabled = false\n[healthcheck \"querychanges\"]\nenabled = false";
+
+ public GerritConfigMap() {
+ super(ConfigMap.class);
+ }
+
+ @Override
+ protected ConfigMap desired(Gerrit gerrit, Context<Gerrit> context) {
+ Map<String, String> gerritLabels =
+ GerritCluster.getLabels(
+ gerrit.getMetadata().getName(), getName(gerrit), this.getClass().getSimpleName());
+
+ Map<String, String> configFiles = gerrit.getSpec().getConfigFiles();
+
+ if (!configFiles.containsKey("gerrit.config")) {
+ configFiles.put("gerrit.config", "");
+ }
+
+ configFiles.put("gerrit.config", new GerritConfigBuilder(gerrit).build().toText());
+
+ if (gerrit.getSpec().isHighlyAvailablePrimary()) {
+ configFiles.put(
+ "high-availability.config",
+ new HighAvailabilityPluginConfigBuilder(gerrit).build().toText());
+ }
+
+ switch (gerrit.getSpec().getRefdb().getDatabase()) {
+ case ZOOKEEPER:
+ configFiles.put(
+ "zookeeper-refdb.config",
+ new ZookeeperRefDbPluginConfigBuilder(gerrit).build().toText());
+ break;
+ case SPANNER:
+ configFiles.put(
+ "spanner-refdb.config", new SpannerRefDbPluginConfigBuilder(gerrit).build().toText());
+ break;
+ default:
+ break;
+ }
+
+ if (!configFiles.containsKey("healthcheck.config")) {
+ configFiles.put("healthcheck.config", DEFAULT_HEALTHCHECK_CONFIG);
+ }
+
+ return new ConfigMapBuilder()
+ .withApiVersion("v1")
+ .withNewMetadata()
+ .withName(getName(gerrit))
+ .withNamespace(gerrit.getMetadata().getNamespace())
+ .withLabels(gerritLabels)
+ .endMetadata()
+ .withData(configFiles)
+ .build();
+ }
+
+ public static String getName(Gerrit gerrit) {
+ return String.format("%s-configmap", gerrit.getMetadata().getName());
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritConfigMapDiscriminator.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritConfigMapDiscriminator.java
new file mode 100644
index 0000000..6ec6b77
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritConfigMapDiscriminator.java
@@ -0,0 +1,36 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gerrit.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import io.fabric8.kubernetes.api.model.ConfigMap;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.ResourceDiscriminator;
+import io.javaoperatorsdk.operator.processing.event.ResourceID;
+import io.javaoperatorsdk.operator.processing.event.source.informer.InformerEventSource;
+import java.util.Optional;
+
+public class GerritConfigMapDiscriminator implements ResourceDiscriminator<ConfigMap, Gerrit> {
+ @Override
+ public Optional<ConfigMap> distinguish(
+ Class<ConfigMap> resource, Gerrit primary, Context<Gerrit> context) {
+ InformerEventSource<ConfigMap, Gerrit> ies =
+ (InformerEventSource<ConfigMap, Gerrit>)
+ context.eventSourceRetriever().getResourceEventSourceFor(ConfigMap.class);
+
+ return ies.get(
+ new ResourceID(GerritConfigMap.getName(primary), primary.getMetadata().getNamespace()));
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritInitConfigMap.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritInitConfigMap.java
new file mode 100644
index 0000000..3b9b8b4
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritInitConfigMap.java
@@ -0,0 +1,94 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gerrit.dependent;
+
+import static com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster.PLUGIN_CACHE_MOUNT_PATH;
+import static com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GlobalRefDbConfig.RefDatabase.SPANNER;
+import static com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GlobalRefDbConfig.RefDatabase.ZOOKEEPER;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
+import com.fasterxml.jackson.dataformat.yaml.YAMLGenerator.Feature;
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritInitConfig;
+import io.fabric8.kubernetes.api.model.ConfigMap;
+import io.fabric8.kubernetes.api.model.ConfigMapBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.CRUDKubernetesDependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import java.util.Locale;
+import java.util.Map;
+
+@KubernetesDependent(resourceDiscriminator = GerritInitConfigMapDiscriminator.class)
+public class GerritInitConfigMap extends CRUDKubernetesDependentResource<ConfigMap, Gerrit> {
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+
+ public GerritInitConfigMap() {
+ super(ConfigMap.class);
+ }
+
+ @Override
+ protected ConfigMap desired(Gerrit gerrit, Context<Gerrit> context) {
+ Map<String, String> gerritLabels =
+ GerritCluster.getLabels(
+ gerrit.getMetadata().getName(), getName(gerrit), this.getClass().getSimpleName());
+
+ return new ConfigMapBuilder()
+ .withApiVersion("v1")
+ .withNewMetadata()
+ .withName(getName(gerrit))
+ .withNamespace(gerrit.getMetadata().getNamespace())
+ .withLabels(gerritLabels)
+ .endMetadata()
+ .withData(Map.of("gerrit-init.yaml", getGerritInitConfig(gerrit)))
+ .build();
+ }
+
+ private String getGerritInitConfig(Gerrit gerrit) {
+ GerritInitConfig config = new GerritInitConfig();
+ config.setPlugins(gerrit.getSpec().getPlugins());
+ config.setLibs(gerrit.getSpec().getLibs());
+ config.setPluginCacheEnabled(gerrit.getSpec().getStorage().getPluginCache().isEnabled());
+ config.setPluginCacheDir(PLUGIN_CACHE_MOUNT_PATH);
+ config.setHighlyAvailable(gerrit.getSpec().isHighlyAvailablePrimary());
+
+ switch (gerrit.getSpec().getRefdb().getDatabase()) {
+ case ZOOKEEPER:
+ config.setRefdb(ZOOKEEPER.toString().toLowerCase(Locale.US));
+ break;
+ case SPANNER:
+ config.setRefdb(SPANNER.toString().toLowerCase(Locale.US));
+ break;
+ default:
+ break;
+ }
+
+ ObjectMapper mapper =
+ new ObjectMapper(new YAMLFactory().disable(Feature.WRITE_DOC_START_MARKER));
+ try {
+ return mapper.writeValueAsString(config);
+ } catch (JsonProcessingException e) {
+ logger.atSevere().withCause(e).log("Could not serialize gerrit-init.config");
+ throw new IllegalStateException(e);
+ }
+ }
+
+ public static String getName(Gerrit gerrit) {
+ return String.format("%s-init-configmap", gerrit.getMetadata().getName());
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritInitConfigMapDiscriminator.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritInitConfigMapDiscriminator.java
new file mode 100644
index 0000000..5494f5a
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritInitConfigMapDiscriminator.java
@@ -0,0 +1,36 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gerrit.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import io.fabric8.kubernetes.api.model.ConfigMap;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.ResourceDiscriminator;
+import io.javaoperatorsdk.operator.processing.event.ResourceID;
+import io.javaoperatorsdk.operator.processing.event.source.informer.InformerEventSource;
+import java.util.Optional;
+
+public class GerritInitConfigMapDiscriminator implements ResourceDiscriminator<ConfigMap, Gerrit> {
+ @Override
+ public Optional<ConfigMap> distinguish(
+ Class<ConfigMap> resource, Gerrit primary, Context<Gerrit> context) {
+ InformerEventSource<ConfigMap, Gerrit> ies =
+ (InformerEventSource<ConfigMap, Gerrit>)
+ context.eventSourceRetriever().getResourceEventSourceFor(ConfigMap.class);
+
+ return ies.get(
+ new ResourceID(GerritInitConfigMap.getName(primary), primary.getMetadata().getNamespace()));
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritSecret.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritSecret.java
new file mode 100644
index 0000000..ac65dcd
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritSecret.java
@@ -0,0 +1,67 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gerrit.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import io.fabric8.kubernetes.api.model.Secret;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.ReconcileResult;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependentResource;
+import io.javaoperatorsdk.operator.processing.event.ResourceID;
+import io.javaoperatorsdk.operator.processing.event.source.SecondaryToPrimaryMapper;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+@KubernetesDependent
+public class GerritSecret extends KubernetesDependentResource<Secret, Gerrit>
+ implements SecondaryToPrimaryMapper<Secret> {
+
+ public static final String CONTEXT_SECRET_VERSION_KEY = "gerrit-secret-version";
+
+ public GerritSecret() {
+ super(Secret.class);
+ }
+
+ @Override
+ public Set<ResourceID> toPrimaryResourceIDs(Secret secret) {
+ return client
+ .resources(Gerrit.class)
+ .inNamespace(secret.getMetadata().getNamespace())
+ .list()
+ .getItems()
+ .stream()
+ .filter(g -> g.getSpec().getSecretRef().equals(secret.getMetadata().getName()))
+ .map(g -> ResourceID.fromResource(g))
+ .collect(Collectors.toSet());
+ }
+
+ @Override
+ protected ReconcileResult<Secret> reconcile(
+ Gerrit primary, Secret actualResource, Context<Gerrit> context) {
+ Secret sec =
+ client
+ .secrets()
+ .inNamespace(primary.getMetadata().getNamespace())
+ .withName(primary.getSpec().getSecretRef())
+ .get();
+ if (sec != null) {
+ context
+ .managedDependentResourceContext()
+ .put(CONTEXT_SECRET_VERSION_KEY, sec.getMetadata().getResourceVersion());
+ }
+ return ReconcileResult.noOperation(actualResource);
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritService.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritService.java
new file mode 100644
index 0000000..888903c
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritService.java
@@ -0,0 +1,108 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gerrit.dependent;
+
+import static com.google.gerrit.k8s.operator.gerrit.dependent.GerritStatefulSet.HTTP_PORT;
+import static com.google.gerrit.k8s.operator.gerrit.dependent.GerritStatefulSet.SSH_PORT;
+
+import com.google.gerrit.k8s.operator.gerrit.GerritReconciler;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplate;
+import io.fabric8.kubernetes.api.model.Service;
+import io.fabric8.kubernetes.api.model.ServiceBuilder;
+import io.fabric8.kubernetes.api.model.ServicePort;
+import io.fabric8.kubernetes.api.model.ServicePortBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.CRUDKubernetesDependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+@KubernetesDependent
+public class GerritService extends CRUDKubernetesDependentResource<Service, Gerrit> {
+ public static final String HTTP_PORT_NAME = "http";
+
+ public GerritService() {
+ super(Service.class);
+ }
+
+ @Override
+ protected Service desired(Gerrit gerrit, Context<Gerrit> context) {
+ return new ServiceBuilder()
+ .withApiVersion("v1")
+ .withNewMetadata()
+ .withName(getName(gerrit))
+ .withNamespace(gerrit.getMetadata().getNamespace())
+ .withLabels(getLabels(gerrit))
+ .endMetadata()
+ .withNewSpec()
+ .withType(gerrit.getSpec().getService().getType())
+ .withPorts(getServicePorts(gerrit))
+ .withSelector(GerritStatefulSet.getSelectorLabels(gerrit))
+ .endSpec()
+ .build();
+ }
+
+ public static String getName(Gerrit gerrit) {
+ return gerrit.getMetadata().getName();
+ }
+
+ public static String getName(String gerritName) {
+ return gerritName;
+ }
+
+ public static String getName(GerritTemplate gerrit) {
+ return gerrit.getMetadata().getName();
+ }
+
+ public static String getHostname(Gerrit gerrit) {
+ return getHostname(gerrit.getMetadata().getName(), gerrit.getMetadata().getNamespace());
+ }
+
+ public static String getHostname(String name, String namespace) {
+ return String.format("%s.%s.svc.cluster.local", name, namespace);
+ }
+
+ public static String getUrl(Gerrit gerrit) {
+ return String.format(
+ "http://%s:%s", getHostname(gerrit), gerrit.getSpec().getService().getHttpPort());
+ }
+
+ public static Map<String, String> getLabels(Gerrit gerrit) {
+ return GerritCluster.getLabels(
+ gerrit.getMetadata().getName(), "gerrit-service", GerritReconciler.class.getSimpleName());
+ }
+
+ private static List<ServicePort> getServicePorts(Gerrit gerrit) {
+ List<ServicePort> ports = new ArrayList<>();
+ ports.add(
+ new ServicePortBuilder()
+ .withName(HTTP_PORT_NAME)
+ .withPort(gerrit.getSpec().getService().getHttpPort())
+ .withNewTargetPort(HTTP_PORT)
+ .build());
+ if (gerrit.isSshEnabled()) {
+ ports.add(
+ new ServicePortBuilder()
+ .withName("ssh")
+ .withPort(gerrit.getSpec().getService().getSshPort())
+ .withNewTargetPort(SSH_PORT)
+ .build());
+ }
+ return ports;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritStatefulSet.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritStatefulSet.java
new file mode 100644
index 0000000..d319f4c
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gerrit/dependent/GerritStatefulSet.java
@@ -0,0 +1,364 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gerrit.dependent;
+
+import static com.google.gerrit.k8s.operator.gerrit.dependent.GerritSecret.CONTEXT_SECRET_VERSION_KEY;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.k8s.operator.gerrit.GerritReconciler;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.ContainerImageConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.NfsWorkaroundConfig;
+import io.fabric8.kubernetes.api.model.Container;
+import io.fabric8.kubernetes.api.model.ContainerPort;
+import io.fabric8.kubernetes.api.model.EnvVar;
+import io.fabric8.kubernetes.api.model.EnvVarBuilder;
+import io.fabric8.kubernetes.api.model.Volume;
+import io.fabric8.kubernetes.api.model.VolumeBuilder;
+import io.fabric8.kubernetes.api.model.VolumeMount;
+import io.fabric8.kubernetes.api.model.VolumeMountBuilder;
+import io.fabric8.kubernetes.api.model.apps.StatefulSet;
+import io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.CRUDKubernetesDependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import java.sql.Timestamp;
+import java.text.SimpleDateFormat;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+
+@KubernetesDependent
+public class GerritStatefulSet extends CRUDKubernetesDependentResource<StatefulSet, Gerrit> {
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+ private static final SimpleDateFormat RFC3339 = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'");
+
+ private static final String SITE_VOLUME_NAME = "gerrit-site";
+ public static final int HTTP_PORT = 8080;
+ public static final int SSH_PORT = 29418;
+ public static final int JGROUPS_PORT = 7800;
+ public static final int DEBUG_PORT = 8000;
+
+ public GerritStatefulSet() {
+ super(StatefulSet.class);
+ }
+
+ @Override
+ protected StatefulSet desired(Gerrit gerrit, Context<Gerrit> context) {
+ StatefulSetBuilder stsBuilder = new StatefulSetBuilder();
+
+ List<Container> initContainers = new ArrayList<>();
+
+ NfsWorkaroundConfig nfsWorkaround =
+ gerrit.getSpec().getStorage().getStorageClasses().getNfsWorkaround();
+ if (nfsWorkaround.isEnabled() && nfsWorkaround.isChownOnStartup()) {
+ boolean hasIdmapdConfig =
+ gerrit.getSpec().getStorage().getStorageClasses().getNfsWorkaround().getIdmapdConfig()
+ != null;
+ ContainerImageConfig images = gerrit.getSpec().getContainerImages();
+
+ if (gerrit.getSpec().isHighlyAvailablePrimary()) {
+
+ initContainers.add(
+ GerritCluster.createNfsInitContainer(
+ hasIdmapdConfig, images, List.of(GerritCluster.getHAShareVolumeMount())));
+ } else {
+ initContainers.add(GerritCluster.createNfsInitContainer(hasIdmapdConfig, images));
+ }
+ }
+
+ Map<String, String> replicaSetAnnotations = new HashMap<>();
+ if (gerrit.getStatus() != null && isGerritRestartRequired(gerrit, context)) {
+ replicaSetAnnotations.put(
+ "kubectl.kubernetes.io/restartedAt", RFC3339.format(Timestamp.from(Instant.now())));
+ } else {
+ Optional<StatefulSet> existingSts = context.getSecondaryResource(StatefulSet.class);
+ if (existingSts.isPresent()) {
+ Map<String, String> existingAnnotations =
+ existingSts.get().getSpec().getTemplate().getMetadata().getAnnotations();
+ if (existingAnnotations.containsKey("kubectl.kubernetes.io/restartedAt")) {
+ replicaSetAnnotations.put(
+ "kubectl.kubernetes.io/restartedAt",
+ existingAnnotations.get("kubectl.kubernetes.io/restartedAt"));
+ }
+ }
+ }
+
+ stsBuilder
+ .withApiVersion("apps/v1")
+ .withNewMetadata()
+ .withName(gerrit.getMetadata().getName())
+ .withNamespace(gerrit.getMetadata().getNamespace())
+ .withLabels(getLabels(gerrit))
+ .endMetadata()
+ .withNewSpec()
+ .withServiceName(GerritService.getName(gerrit))
+ .withReplicas(gerrit.getSpec().getReplicas())
+ .withNewUpdateStrategy()
+ .withNewRollingUpdate()
+ .withPartition(gerrit.getSpec().getUpdatePartition())
+ .endRollingUpdate()
+ .endUpdateStrategy()
+ .withNewSelector()
+ .withMatchLabels(getSelectorLabels(gerrit))
+ .endSelector()
+ .withNewTemplate()
+ .withNewMetadata()
+ .withAnnotations(replicaSetAnnotations)
+ .withLabels(getLabels(gerrit))
+ .endMetadata()
+ .withNewSpec()
+ .withServiceAccount(gerrit.getSpec().getServiceAccount())
+ .withTolerations(gerrit.getSpec().getTolerations())
+ .withTopologySpreadConstraints(gerrit.getSpec().getTopologySpreadConstraints())
+ .withAffinity(gerrit.getSpec().getAffinity())
+ .withPriorityClassName(gerrit.getSpec().getPriorityClassName())
+ .withTerminationGracePeriodSeconds(gerrit.getSpec().getGracefulStopTimeout())
+ .addAllToImagePullSecrets(gerrit.getSpec().getContainerImages().getImagePullSecrets())
+ .withNewSecurityContext()
+ .withFsGroup(100L)
+ .endSecurityContext()
+ .addNewInitContainer()
+ .withName("gerrit-init")
+ .withEnv(getEnvVars(gerrit))
+ .withImagePullPolicy(gerrit.getSpec().getContainerImages().getImagePullPolicy())
+ .withImage(
+ gerrit.getSpec().getContainerImages().getGerritImages().getFullImageName("gerrit-init"))
+ .withResources(gerrit.getSpec().getResources())
+ .addAllToVolumeMounts(getVolumeMounts(gerrit, true))
+ .endInitContainer()
+ .addAllToInitContainers(initContainers)
+ .addNewContainer()
+ .withName("gerrit")
+ .withImagePullPolicy(gerrit.getSpec().getContainerImages().getImagePullPolicy())
+ .withImage(
+ gerrit.getSpec().getContainerImages().getGerritImages().getFullImageName("gerrit"))
+ .withNewLifecycle()
+ .withNewPreStop()
+ .withNewExec()
+ .withCommand(
+ "/bin/ash/", "-c", "kill -2 $(pidof java) && tail --pid=$(pidof java) -f /dev/null")
+ .endExec()
+ .endPreStop()
+ .endLifecycle()
+ .withEnv(getEnvVars(gerrit))
+ .withPorts(getContainerPorts(gerrit))
+ .withResources(gerrit.getSpec().getResources())
+ .withStartupProbe(gerrit.getSpec().getStartupProbe())
+ .withReadinessProbe(gerrit.getSpec().getReadinessProbe())
+ .withLivenessProbe(gerrit.getSpec().getLivenessProbe())
+ .addAllToVolumeMounts(getVolumeMounts(gerrit, false))
+ .endContainer()
+ .addAllToVolumes(getVolumes(gerrit))
+ .endSpec()
+ .endTemplate()
+ .addNewVolumeClaimTemplate()
+ .withNewMetadata()
+ .withName(SITE_VOLUME_NAME)
+ .withLabels(getSelectorLabels(gerrit))
+ .endMetadata()
+ .withNewSpec()
+ .withAccessModes("ReadWriteOnce")
+ .withNewResources()
+ .withRequests(Map.of("storage", gerrit.getSpec().getSite().getSize()))
+ .endResources()
+ .withStorageClassName(gerrit.getSpec().getStorage().getStorageClasses().getReadWriteOnce())
+ .endSpec()
+ .endVolumeClaimTemplate()
+ .endSpec();
+
+ return stsBuilder.build();
+ }
+
+ private static String getComponentName(Gerrit gerrit) {
+ return String.format("gerrit-statefulset-%s", gerrit.getMetadata().getName());
+ }
+
+ public static Map<String, String> getSelectorLabels(Gerrit gerrit) {
+ return GerritCluster.getSelectorLabels(
+ gerrit.getMetadata().getName(), getComponentName(gerrit));
+ }
+
+ private static Map<String, String> getLabels(Gerrit gerrit) {
+ return GerritCluster.getLabels(
+ gerrit.getMetadata().getName(),
+ getComponentName(gerrit),
+ GerritReconciler.class.getSimpleName());
+ }
+
+ private Set<Volume> getVolumes(Gerrit gerrit) {
+ Set<Volume> volumes = new HashSet<>();
+
+ volumes.add(
+ GerritCluster.getSharedVolume(
+ gerrit.getSpec().getStorage().getSharedStorage().getExternalPVC()));
+
+ volumes.add(
+ new VolumeBuilder()
+ .withName("gerrit-init-config")
+ .withNewConfigMap()
+ .withName(GerritInitConfigMap.getName(gerrit))
+ .endConfigMap()
+ .build());
+
+ volumes.add(
+ new VolumeBuilder()
+ .withName("gerrit-config")
+ .withNewConfigMap()
+ .withName(GerritConfigMap.getName(gerrit))
+ .endConfigMap()
+ .build());
+
+ volumes.add(
+ new VolumeBuilder()
+ .withName(gerrit.getSpec().getSecretRef())
+ .withNewSecret()
+ .withSecretName(gerrit.getSpec().getSecretRef())
+ .endSecret()
+ .build());
+
+ NfsWorkaroundConfig nfsWorkaround =
+ gerrit.getSpec().getStorage().getStorageClasses().getNfsWorkaround();
+ if (nfsWorkaround.isEnabled() && nfsWorkaround.getIdmapdConfig() != null) {
+ volumes.add(GerritCluster.getNfsImapdConfigVolume());
+ }
+
+ return volumes;
+ }
+
+ private Set<VolumeMount> getVolumeMounts(Gerrit gerrit, boolean isInitContainer) {
+ Set<VolumeMount> volumeMounts = new HashSet<>();
+ volumeMounts.add(
+ new VolumeMountBuilder().withName(SITE_VOLUME_NAME).withMountPath("/var/gerrit").build());
+ if (gerrit.getSpec().isHighlyAvailablePrimary()) {
+ volumeMounts.add(GerritCluster.getHAShareVolumeMount());
+ }
+ volumeMounts.add(GerritCluster.getGitRepositoriesVolumeMount());
+ volumeMounts.add(GerritCluster.getLogsVolumeMount());
+ volumeMounts.add(
+ new VolumeMountBuilder()
+ .withName("gerrit-config")
+ .withMountPath("/var/mnt/etc/config")
+ .build());
+
+ volumeMounts.add(
+ new VolumeMountBuilder()
+ .withName(gerrit.getSpec().getSecretRef())
+ .withMountPath("/var/mnt/etc/secret")
+ .build());
+
+ if (isInitContainer) {
+ volumeMounts.add(
+ new VolumeMountBuilder()
+ .withName("gerrit-init-config")
+ .withMountPath("/var/config")
+ .build());
+
+ if (gerrit.getSpec().getStorage().getPluginCache().isEnabled()
+ && gerrit.getSpec().getPlugins().stream().anyMatch(p -> !p.isPackagedPlugin())) {
+ volumeMounts.add(GerritCluster.getPluginCacheVolumeMount());
+ }
+ }
+
+ NfsWorkaroundConfig nfsWorkaround =
+ gerrit.getSpec().getStorage().getStorageClasses().getNfsWorkaround();
+ if (nfsWorkaround.isEnabled() && nfsWorkaround.getIdmapdConfig() != null) {
+ volumeMounts.add(GerritCluster.getNfsImapdConfigVolumeMount());
+ }
+
+ return volumeMounts;
+ }
+
+ private List<ContainerPort> getContainerPorts(Gerrit gerrit) {
+ List<ContainerPort> containerPorts = new ArrayList<>();
+ containerPorts.add(new ContainerPort(HTTP_PORT, null, null, "http", null));
+
+ if (gerrit.isSshEnabled()) {
+ containerPorts.add(new ContainerPort(SSH_PORT, null, null, "ssh", null));
+ }
+
+ if (gerrit.getSpec().isHighlyAvailablePrimary()) {
+ containerPorts.add(new ContainerPort(JGROUPS_PORT, null, null, "jgroups", null));
+ }
+
+ if (gerrit.getSpec().getDebug().isEnabled()) {
+ containerPorts.add(new ContainerPort(DEBUG_PORT, null, null, "debug", null));
+ }
+
+ return containerPorts;
+ }
+
+ private List<EnvVar> getEnvVars(Gerrit gerrit) {
+ List<EnvVar> envVars = new ArrayList<>();
+ envVars.add(GerritCluster.getPodNameEnvVar());
+ if (gerrit.getSpec().isHighlyAvailablePrimary()) {
+ envVars.add(
+ new EnvVarBuilder()
+ .withName("GERRIT_URL")
+ .withValue(
+ String.format(
+ "http://$(POD_NAME).%s:%s", GerritService.getHostname(gerrit), HTTP_PORT))
+ .build());
+ }
+ return envVars;
+ }
+
+ private boolean isGerritRestartRequired(Gerrit gerrit, Context<Gerrit> context) {
+ if (wasConfigMapUpdated(GerritInitConfigMap.getName(gerrit), gerrit)
+ || wasConfigMapUpdated(GerritConfigMap.getName(gerrit), gerrit)) {
+ return true;
+ }
+
+ String secretName = gerrit.getSpec().getSecretRef();
+ Optional<String> gerritSecret =
+ context.managedDependentResourceContext().get(CONTEXT_SECRET_VERSION_KEY, String.class);
+ if (gerritSecret.isPresent()) {
+ String secVersion = gerritSecret.get();
+ if (!secVersion.equals(gerrit.getStatus().getAppliedSecretVersions().get(secretName))) {
+ logger.atFine().log(
+ "Looking up Secret: %s; Installed secret resource version: %s; Resource version known to Gerrit: %s",
+ secretName, secVersion, gerrit.getStatus().getAppliedSecretVersions().get(secretName));
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private boolean wasConfigMapUpdated(String configMapName, Gerrit gerrit) {
+ String configMapVersion =
+ client
+ .configMaps()
+ .inNamespace(gerrit.getMetadata().getNamespace())
+ .withName(configMapName)
+ .get()
+ .getMetadata()
+ .getResourceVersion();
+ String knownConfigMapVersion =
+ gerrit.getStatus().getAppliedConfigMapVersions().get(configMapName);
+ if (!configMapVersion.equals(knownConfigMapVersion)) {
+ logger.atInfo().log(
+ "Looking up ConfigMap: %s; Installed configmap resource version: %s; Resource version known to Gerrit: %s",
+ configMapName, configMapVersion, knownConfigMapVersion);
+ return true;
+ }
+ return false;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gitgc/GitGarbageCollectionConflictException.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gitgc/GitGarbageCollectionConflictException.java
new file mode 100644
index 0000000..8d6bb26
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gitgc/GitGarbageCollectionConflictException.java
@@ -0,0 +1,30 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gitgc;
+
+import java.util.Collection;
+
+public class GitGarbageCollectionConflictException extends RuntimeException {
+
+ private static final long serialVersionUID = 1L;
+
+ public GitGarbageCollectionConflictException(Collection<String> projectsIntercept) {
+ super(String.format("Found conflicting GC jobs for projects: %s", projectsIntercept));
+ }
+
+ public GitGarbageCollectionConflictException(String s) {
+ super(s);
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gitgc/GitGarbageCollectionReconciler.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gitgc/GitGarbageCollectionReconciler.java
new file mode 100644
index 0000000..8d28fea
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gitgc/GitGarbageCollectionReconciler.java
@@ -0,0 +1,152 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gitgc;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.k8s.operator.gitgc.dependent.GitGarbageCollectionCronJob;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gitgc.GitGarbageCollection;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gitgc.GitGarbageCollectionStatus;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gitgc.GitGarbageCollectionStatus.GitGcState;
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+import io.fabric8.kubernetes.client.KubernetesClient;
+import io.javaoperatorsdk.operator.api.config.informer.InformerConfiguration;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.ControllerConfiguration;
+import io.javaoperatorsdk.operator.api.reconciler.ErrorStatusHandler;
+import io.javaoperatorsdk.operator.api.reconciler.ErrorStatusUpdateControl;
+import io.javaoperatorsdk.operator.api.reconciler.EventSourceContext;
+import io.javaoperatorsdk.operator.api.reconciler.EventSourceInitializer;
+import io.javaoperatorsdk.operator.api.reconciler.Reconciler;
+import io.javaoperatorsdk.operator.api.reconciler.UpdateControl;
+import io.javaoperatorsdk.operator.processing.event.ResourceID;
+import io.javaoperatorsdk.operator.processing.event.source.EventSource;
+import io.javaoperatorsdk.operator.processing.event.source.SecondaryToPrimaryMapper;
+import io.javaoperatorsdk.operator.processing.event.source.informer.InformerEventSource;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+@Singleton
+@ControllerConfiguration
+public class GitGarbageCollectionReconciler
+ implements Reconciler<GitGarbageCollection>,
+ EventSourceInitializer<GitGarbageCollection>,
+ ErrorStatusHandler<GitGarbageCollection> {
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+ private final KubernetesClient client;
+
+ private GitGarbageCollectionCronJob dependentCronJob;
+
+ @Inject
+ public GitGarbageCollectionReconciler(KubernetesClient client) {
+ this.client = client;
+ this.dependentCronJob = new GitGarbageCollectionCronJob();
+ this.dependentCronJob.setKubernetesClient(client);
+ }
+
+ @Override
+ public Map<String, EventSource> prepareEventSources(
+ EventSourceContext<GitGarbageCollection> context) {
+ final SecondaryToPrimaryMapper<GitGarbageCollection> specificProjectGitGcMapper =
+ (GitGarbageCollection gc) ->
+ context
+ .getPrimaryCache()
+ .list(gitGc -> gitGc.getSpec().getProjects().isEmpty())
+ .map(ResourceID::fromResource)
+ .collect(Collectors.toSet());
+
+ InformerEventSource<GitGarbageCollection, GitGarbageCollection> gitGcEventSource =
+ new InformerEventSource<>(
+ InformerConfiguration.from(GitGarbageCollection.class, context)
+ .withSecondaryToPrimaryMapper(specificProjectGitGcMapper)
+ .build(),
+ context);
+
+ final SecondaryToPrimaryMapper<GerritCluster> gerritClusterMapper =
+ (GerritCluster cluster) ->
+ context
+ .getPrimaryCache()
+ .list(gitGc -> gitGc.getSpec().getCluster().equals(cluster.getMetadata().getName()))
+ .map(ResourceID::fromResource)
+ .collect(Collectors.toSet());
+
+ InformerEventSource<GerritCluster, GitGarbageCollection> gerritClusterEventSource =
+ new InformerEventSource<>(
+ InformerConfiguration.from(GerritCluster.class, context)
+ .withSecondaryToPrimaryMapper(gerritClusterMapper)
+ .build(),
+ context);
+
+ return EventSourceInitializer.nameEventSources(
+ gitGcEventSource, gerritClusterEventSource, dependentCronJob.initEventSource(context));
+ }
+
+ @Override
+ public UpdateControl<GitGarbageCollection> reconcile(
+ GitGarbageCollection gitGc, Context<GitGarbageCollection> context) {
+ if (gitGc.getSpec().getProjects().isEmpty()) {
+ gitGc = excludeProjectsHandledSeparately(gitGc);
+ }
+
+ dependentCronJob.reconcile(gitGc, context);
+ return UpdateControl.updateStatus(updateGitGcStatus(gitGc));
+ }
+
+ private GitGarbageCollection updateGitGcStatus(GitGarbageCollection gitGc) {
+ GitGarbageCollectionStatus status = gitGc.getStatus();
+ if (status == null) {
+ status = new GitGarbageCollectionStatus();
+ }
+ status.setReplicateAll(gitGc.getSpec().getProjects().isEmpty());
+ status.setState(GitGcState.ACTIVE);
+ gitGc.setStatus(status);
+ return gitGc;
+ }
+
+ private GitGarbageCollection excludeProjectsHandledSeparately(GitGarbageCollection currentGitGc) {
+ List<GitGarbageCollection> gitGcs =
+ client
+ .resources(GitGarbageCollection.class)
+ .inNamespace(currentGitGc.getMetadata().getNamespace())
+ .list()
+ .getItems();
+ gitGcs.remove(currentGitGc);
+ GitGarbageCollectionStatus currentGitGcStatus = currentGitGc.getStatus();
+ currentGitGcStatus.resetExcludedProjects();
+ for (GitGarbageCollection gc : gitGcs) {
+ currentGitGcStatus.excludeProjects(gc.getSpec().getProjects());
+ }
+ currentGitGc.setStatus(currentGitGcStatus);
+
+ return currentGitGc;
+ }
+
+ @Override
+ public ErrorStatusUpdateControl<GitGarbageCollection> updateErrorStatus(
+ GitGarbageCollection gitGc, Context<GitGarbageCollection> context, Exception e) {
+ GitGarbageCollectionStatus status = new GitGarbageCollectionStatus();
+ if (e instanceof GitGarbageCollectionConflictException) {
+ status.setState(GitGcState.CONFLICT);
+ } else {
+ logger.atSevere().withCause(e).log("Failed reconcile with message: %s", e.getMessage());
+ status.setState(GitGcState.ERROR);
+ }
+ gitGc.setStatus(status);
+
+ return ErrorStatusUpdateControl.updateStatus(gitGc);
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gitgc/dependent/GitGarbageCollectionCronJob.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gitgc/dependent/GitGarbageCollectionCronJob.java
new file mode 100644
index 0000000..254bbab
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/gitgc/dependent/GitGarbageCollectionCronJob.java
@@ -0,0 +1,186 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gitgc.dependent;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gitgc.GitGarbageCollection;
+import io.fabric8.kubernetes.api.model.Container;
+import io.fabric8.kubernetes.api.model.ContainerBuilder;
+import io.fabric8.kubernetes.api.model.Volume;
+import io.fabric8.kubernetes.api.model.VolumeMount;
+import io.fabric8.kubernetes.api.model.batch.v1.CronJob;
+import io.fabric8.kubernetes.api.model.batch.v1.CronJobBuilder;
+import io.fabric8.kubernetes.api.model.batch.v1.JobTemplateSpec;
+import io.fabric8.kubernetes.api.model.batch.v1.JobTemplateSpecBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.CRUDKubernetesDependentResource;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+public class GitGarbageCollectionCronJob
+ extends CRUDKubernetesDependentResource<CronJob, GitGarbageCollection> {
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+
+ public GitGarbageCollectionCronJob() {
+ super(CronJob.class);
+ }
+
+ @Override
+ protected CronJob desired(GitGarbageCollection gitGc, Context<GitGarbageCollection> context) {
+ String ns = gitGc.getMetadata().getNamespace();
+ String name = gitGc.getMetadata().getName();
+ GerritCluster gerritCluster =
+ client
+ .resources(GerritCluster.class)
+ .inNamespace(ns)
+ .withName(gitGc.getSpec().getCluster())
+ .get();
+ logger.atInfo().log("Reconciling GitGc with name: %s/%s", ns, name);
+
+ Map<String, String> gitGcLabels =
+ gerritCluster.getLabels("GitGc", this.getClass().getSimpleName());
+
+ List<Container> initContainers = new ArrayList<>();
+ if (gerritCluster.getSpec().getStorage().getStorageClasses().getNfsWorkaround().isEnabled()
+ && gerritCluster
+ .getSpec()
+ .getStorage()
+ .getStorageClasses()
+ .getNfsWorkaround()
+ .isChownOnStartup()) {
+ initContainers.add(gerritCluster.createNfsInitContainer());
+ }
+
+ JobTemplateSpec gitGcJobTemplate =
+ new JobTemplateSpecBuilder()
+ .withNewSpec()
+ .withNewTemplate()
+ .withNewMetadata()
+ .withAnnotations(
+ Map.of(
+ "sidecar.istio.io/inject",
+ "false",
+ "cluster-autoscaler.kubernetes.io/safe-to-evict",
+ "false"))
+ .withLabels(gitGcLabels)
+ .endMetadata()
+ .withNewSpec()
+ .withTolerations(gitGc.getSpec().getTolerations())
+ .withAffinity(gitGc.getSpec().getAffinity())
+ .addAllToImagePullSecrets(
+ gerritCluster.getSpec().getContainerImages().getImagePullSecrets())
+ .withRestartPolicy("OnFailure")
+ .withNewSecurityContext()
+ .withFsGroup(100L)
+ .endSecurityContext()
+ .addToContainers(buildGitGcContainer(gitGc, gerritCluster))
+ .withVolumes(getVolumes(gerritCluster))
+ .endSpec()
+ .endTemplate()
+ .endSpec()
+ .build();
+
+ return new CronJobBuilder()
+ .withApiVersion("batch/v1")
+ .withNewMetadata()
+ .withNamespace(ns)
+ .withName(name)
+ .withLabels(gitGcLabels)
+ .withAnnotations(
+ Collections.singletonMap("app.kubernetes.io/managed-by", "gerrit-operator"))
+ .addNewOwnerReference()
+ .withApiVersion(gitGc.getApiVersion())
+ .withKind(gitGc.getKind())
+ .withName(name)
+ .withUid(gitGc.getMetadata().getUid())
+ .endOwnerReference()
+ .endMetadata()
+ .withNewSpec()
+ .withSchedule(gitGc.getSpec().getSchedule())
+ .withConcurrencyPolicy("Forbid")
+ .withJobTemplate(gitGcJobTemplate)
+ .endSpec()
+ .build();
+ }
+
+ private Container buildGitGcContainer(GitGarbageCollection gitGc, GerritCluster gerritCluster) {
+ List<VolumeMount> volumeMounts =
+ List.of(
+ GerritCluster.getGitRepositoriesVolumeMount("/var/gerrit/git"),
+ GerritCluster.getLogsVolumeMount("/var/log/git"));
+
+ if (gerritCluster.getSpec().getStorage().getStorageClasses().getNfsWorkaround().isEnabled()
+ && gerritCluster
+ .getSpec()
+ .getStorage()
+ .getStorageClasses()
+ .getNfsWorkaround()
+ .getIdmapdConfig()
+ != null) {
+ volumeMounts.add(GerritCluster.getNfsImapdConfigVolumeMount());
+ }
+
+ ContainerBuilder gitGcContainerBuilder =
+ new ContainerBuilder()
+ .withName("git-gc")
+ .withImagePullPolicy(gerritCluster.getSpec().getContainerImages().getImagePullPolicy())
+ .withImage(
+ gerritCluster
+ .getSpec()
+ .getContainerImages()
+ .getGerritImages()
+ .getFullImageName("git-gc"))
+ .withResources(gitGc.getSpec().getResources())
+ .withEnv(GerritCluster.getPodNameEnvVar())
+ .withVolumeMounts(volumeMounts);
+
+ ArrayList<String> args = new ArrayList<>();
+ for (String project : gitGc.getSpec().getProjects()) {
+ args.add("-p");
+ args.add(project);
+ }
+ for (String project : gitGc.getStatus().getExcludedProjects()) {
+ args.add("-s");
+ args.add(project);
+ }
+ gitGcContainerBuilder.addAllToArgs(args);
+
+ return gitGcContainerBuilder.build();
+ }
+
+ private List<Volume> getVolumes(GerritCluster gerritCluster) {
+ List<Volume> volumes = new ArrayList<>();
+
+ volumes.add(
+ GerritCluster.getSharedVolume(
+ gerritCluster.getSpec().getStorage().getSharedStorage().getExternalPVC()));
+
+ if (gerritCluster.getSpec().getStorage().getStorageClasses().getNfsWorkaround().isEnabled()) {
+ if (gerritCluster
+ .getSpec()
+ .getStorage()
+ .getStorageClasses()
+ .getNfsWorkaround()
+ .getIdmapdConfig()
+ != null) {
+ volumes.add(GerritCluster.getNfsImapdConfigVolume());
+ }
+ }
+ return volumes;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/Constants.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/Constants.java
new file mode 100644
index 0000000..1a7f3b2
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/Constants.java
@@ -0,0 +1,22 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.package com.google.gerrit.k8s.operator.network;
+
+package com.google.gerrit.k8s.operator.network;
+
+public class Constants {
+ public static String UPLOAD_PACK_URL_PATTERN = "/.*/git-upload-pack";
+ public static String INFO_REFS_PATTERN = "/.*/info/refs";
+ public static String RECEIVE_PACK_URL_PATTERN = "/.*/git-receive-pack";
+ public static String PROJECTS_URL_PATTERN = "/a/projects/.*";
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/GerritClusterIngressCondition.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/GerritClusterIngressCondition.java
new file mode 100644
index 0000000..9077f1b
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/GerritClusterIngressCondition.java
@@ -0,0 +1,33 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.fabric8.kubernetes.api.model.networking.v1.Ingress;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.DependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.workflow.Condition;
+
+public class GerritClusterIngressCondition implements Condition<Ingress, GerritNetwork> {
+
+ @Override
+ public boolean isMet(
+ DependentResource<Ingress, GerritNetwork> dependentResource,
+ GerritNetwork gerritNetwork,
+ Context<GerritNetwork> context) {
+ return gerritNetwork.getSpec().getIngress().isEnabled()
+ && (gerritNetwork.hasReceiver() || gerritNetwork.hasGerrits());
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/GerritNetworkReconcilerProvider.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/GerritNetworkReconcilerProvider.java
new file mode 100644
index 0000000..a614e92
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/GerritNetworkReconcilerProvider.java
@@ -0,0 +1,48 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network;
+
+import com.google.gerrit.k8s.operator.network.ambassador.GerritAmbassadorReconciler;
+import com.google.gerrit.k8s.operator.network.ingress.GerritIngressReconciler;
+import com.google.gerrit.k8s.operator.network.istio.GerritIstioReconciler;
+import com.google.gerrit.k8s.operator.network.none.GerritNoIngressReconciler;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import com.google.inject.Inject;
+import com.google.inject.Provider;
+import com.google.inject.name.Named;
+import io.javaoperatorsdk.operator.api.reconciler.Reconciler;
+
+public class GerritNetworkReconcilerProvider implements Provider<Reconciler<GerritNetwork>> {
+ private final IngressType ingressType;
+
+ @Inject
+ public GerritNetworkReconcilerProvider(@Named("IngressType") IngressType ingressType) {
+ this.ingressType = ingressType;
+ }
+
+ @Override
+ public Reconciler<GerritNetwork> get() {
+ switch (ingressType) {
+ case INGRESS:
+ return new GerritIngressReconciler();
+ case ISTIO:
+ return new GerritIstioReconciler();
+ case AMBASSADOR:
+ return new GerritAmbassadorReconciler();
+ default:
+ return new GerritNoIngressReconciler();
+ }
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/IngressType.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/IngressType.java
new file mode 100644
index 0000000..9c5383c
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/IngressType.java
@@ -0,0 +1,22 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network;
+
+public enum IngressType {
+ NONE,
+ INGRESS,
+ ISTIO,
+ AMBASSADOR
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/GerritAmbassadorReconciler.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/GerritAmbassadorReconciler.java
new file mode 100644
index 0000000..34dc9db
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/GerritAmbassadorReconciler.java
@@ -0,0 +1,169 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador;
+
+import static com.google.gerrit.k8s.operator.network.ambassador.GerritAmbassadorReconciler.MAPPING_EVENT_SOURCE;
+import static com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterHost.GERRIT_HOST;
+import static com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterMapping.GERRIT_MAPPING;
+import static com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterMappingGETReplica.GERRIT_MAPPING_GET_REPLICA;
+import static com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterMappingPOSTReplica.GERRIT_MAPPING_POST_REPLICA;
+import static com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterMappingPrimary.GERRIT_MAPPING_PRIMARY;
+import static com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterMappingReceiver.GERRIT_MAPPING_RECEIVER;
+import static com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterMappingReceiverGET.GERRIT_MAPPING_RECEIVER_GET;
+import static com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterTLSContext.GERRIT_TLS_CONTEXT;
+
+import com.google.gerrit.k8s.operator.network.ambassador.dependent.CreateHostCondition;
+import com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterHost;
+import com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterMapping;
+import com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterMappingGETReplica;
+import com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterMappingPOSTReplica;
+import com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterMappingPrimary;
+import com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterMappingReceiver;
+import com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterMappingReceiverGET;
+import com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterTLSContext;
+import com.google.gerrit.k8s.operator.network.ambassador.dependent.LoadBalanceCondition;
+import com.google.gerrit.k8s.operator.network.ambassador.dependent.ReceiverMappingCondition;
+import com.google.gerrit.k8s.operator.network.ambassador.dependent.SingleMappingCondition;
+import com.google.gerrit.k8s.operator.network.ambassador.dependent.TLSContextCondition;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import com.google.inject.Singleton;
+import io.getambassador.v2.Mapping;
+import io.javaoperatorsdk.operator.api.config.informer.InformerConfiguration;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.ControllerConfiguration;
+import io.javaoperatorsdk.operator.api.reconciler.EventSourceContext;
+import io.javaoperatorsdk.operator.api.reconciler.EventSourceInitializer;
+import io.javaoperatorsdk.operator.api.reconciler.Reconciler;
+import io.javaoperatorsdk.operator.api.reconciler.UpdateControl;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.Dependent;
+import io.javaoperatorsdk.operator.processing.event.source.EventSource;
+import io.javaoperatorsdk.operator.processing.event.source.informer.InformerEventSource;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Provides an Ambassador-based implementation for GerritNetworkReconciler.
+ *
+ * <p>Creates and manages Ambassador Custom Resources using the "managed dependent resources"
+ * approach in josdk. Since multiple dependent resources of the same type (`Mapping`) need to be
+ * created, "resource discriminators" are used for each of the different Mapping dependent
+ * resources.
+ *
+ * <p>Ambassador custom resource POJOs are generated via the `java-generator-maven-plugin` in the
+ * fabric8 project.
+ *
+ * <p>Mapping logic
+ *
+ * <p>The Mappings are created based on the composition of Gerrit instances in the GerritCluster.
+ *
+ * <p>There are three cases:
+ *
+ * <p>1. 0 Primary 1 Replica
+ *
+ * <p>Direct all traffic (read/write) to the Replica
+ *
+ * <p>2. 1 Primary 0 Replica
+ *
+ * <p>Direct all traffic (read/write) to the Primary
+ *
+ * <p>3. 1 Primary 1 Replica
+ *
+ * <p>Direct write traffic to Primary and read traffic to Replica. To capture this requirement,
+ * three different Mappings have to be created.
+ *
+ * <p>Note: git fetch/clone operations result in two HTTP requests to the git server. The first is
+ * of the form `GET /my-test-repo/info/refs?service=git-upload-pack` and the second is of the form
+ * `POST /my-test-repo/git-upload-pack`.
+ *
+ * <p>Note: git push operations result in two HTTP requests to the git server. The first is of the
+ * form `GET /my-test-repo/info/refs?service=git-receive-pack` and the second is of the form `POST
+ * /my-test-repo/git-receive-pack`.
+ *
+ * <p>If a Receiver is part of the GerritCluster, additional mappings are created such that all
+ * requests that the replication plugin sends to the `adminUrl` [1] are routed to the Receiver. This
+ * includes `git push` related `GET` and `POST` requests, and requests to the `/projects` REST API
+ * endpoints.
+ *
+ * <p>[1]
+ * https://gerrit.googlesource.com/plugins/replication/+/refs/heads/master/src/main/resources/Documentation/config.md
+ */
+@Singleton
+@ControllerConfiguration(
+ dependents = {
+ @Dependent(
+ name = GERRIT_MAPPING,
+ type = GerritClusterMapping.class,
+ // Cluster has only either Primary or Replica instance
+ reconcilePrecondition = SingleMappingCondition.class,
+ useEventSourceWithName = MAPPING_EVENT_SOURCE),
+ @Dependent(
+ name = GERRIT_MAPPING_POST_REPLICA,
+ type = GerritClusterMappingPOSTReplica.class,
+ // Cluster has both Primary and Replica instances
+ reconcilePrecondition = LoadBalanceCondition.class,
+ useEventSourceWithName = MAPPING_EVENT_SOURCE),
+ @Dependent(
+ name = GERRIT_MAPPING_GET_REPLICA,
+ type = GerritClusterMappingGETReplica.class,
+ reconcilePrecondition = LoadBalanceCondition.class,
+ useEventSourceWithName = MAPPING_EVENT_SOURCE),
+ @Dependent(
+ name = GERRIT_MAPPING_PRIMARY,
+ type = GerritClusterMappingPrimary.class,
+ reconcilePrecondition = LoadBalanceCondition.class,
+ useEventSourceWithName = MAPPING_EVENT_SOURCE),
+ @Dependent(
+ name = GERRIT_MAPPING_RECEIVER,
+ type = GerritClusterMappingReceiver.class,
+ reconcilePrecondition = ReceiverMappingCondition.class,
+ useEventSourceWithName = MAPPING_EVENT_SOURCE),
+ @Dependent(
+ name = GERRIT_MAPPING_RECEIVER_GET,
+ type = GerritClusterMappingReceiverGET.class,
+ reconcilePrecondition = ReceiverMappingCondition.class,
+ useEventSourceWithName = MAPPING_EVENT_SOURCE),
+ @Dependent(
+ name = GERRIT_TLS_CONTEXT,
+ type = GerritClusterTLSContext.class,
+ reconcilePrecondition = TLSContextCondition.class),
+ @Dependent(
+ name = GERRIT_HOST,
+ type = GerritClusterHost.class,
+ reconcilePrecondition = CreateHostCondition.class),
+ })
+public class GerritAmbassadorReconciler
+ implements Reconciler<GerritNetwork>, EventSourceInitializer<GerritNetwork> {
+
+ public static final String MAPPING_EVENT_SOURCE = "mapping-event-source";
+
+ // Because we have multiple dependent resources of the same type `Mapping`, we need to specify
+ // a named event source.
+ @Override
+ public Map<String, EventSource> prepareEventSources(EventSourceContext<GerritNetwork> context) {
+ InformerEventSource<Mapping, GerritNetwork> mappingEventSource =
+ new InformerEventSource<>(
+ InformerConfiguration.from(Mapping.class, context).build(), context);
+
+ Map<String, EventSource> eventSources = new HashMap<>();
+ eventSources.put(MAPPING_EVENT_SOURCE, mappingEventSource);
+ return eventSources;
+ }
+
+ @Override
+ public UpdateControl<GerritNetwork> reconcile(
+ GerritNetwork resource, Context<GerritNetwork> context) throws Exception {
+ return UpdateControl.noUpdate();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/AbstractAmbassadorDependentResource.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/AbstractAmbassadorDependentResource.java
new file mode 100644
index 0000000..3cc8d4a
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/AbstractAmbassadorDependentResource.java
@@ -0,0 +1,61 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.fabric8.kubernetes.api.model.HasMetadata;
+import io.fabric8.kubernetes.api.model.ObjectMeta;
+import io.fabric8.kubernetes.api.model.ObjectMetaBuilder;
+import io.getambassador.v2.MappingSpec;
+import io.getambassador.v2.MappingSpecBuilder;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.CRUDKubernetesDependentResource;
+import java.util.List;
+
+public abstract class AbstractAmbassadorDependentResource<T extends HasMetadata>
+ extends CRUDKubernetesDependentResource<T, GerritNetwork> {
+
+ public AbstractAmbassadorDependentResource(Class<T> dependentResourceClass) {
+ super(dependentResourceClass);
+ }
+
+ public ObjectMeta getCommonMetadata(GerritNetwork gerritnetwork, String name, String className) {
+ ObjectMeta metadata =
+ new ObjectMetaBuilder()
+ .withName(name)
+ .withNamespace(gerritnetwork.getMetadata().getNamespace())
+ .withLabels(
+ GerritCluster.getLabels(gerritnetwork.getMetadata().getName(), name, className))
+ .build();
+ return metadata;
+ }
+
+ public MappingSpec getCommonSpec(GerritNetwork gerritnetwork, String serviceName) {
+ MappingSpec spec =
+ new MappingSpecBuilder()
+ .withAmbassadorId(getAmbassadorIds(gerritnetwork))
+ .withHost(gerritnetwork.getSpec().getIngress().getHost())
+ .withPrefix("/")
+ .withService(serviceName)
+ .withBypassAuth(true)
+ .withRewrite("") // important - so the prefix doesn't get overwritten to "/"
+ .build();
+ return spec;
+ }
+
+ public List<String> getAmbassadorIds(GerritNetwork gerritnetwork) {
+ return gerritnetwork.getSpec().getIngress().getAmbassador().getId();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/CreateHostCondition.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/CreateHostCondition.java
new file mode 100644
index 0000000..e7f99ec
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/CreateHostCondition.java
@@ -0,0 +1,34 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.getambassador.v2.Mapping;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.DependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.workflow.Condition;
+
+public class CreateHostCondition implements Condition<Mapping, GerritNetwork> {
+
+ @Override
+ public boolean isMet(
+ DependentResource<Mapping, GerritNetwork> dependentResource,
+ GerritNetwork gerritNetwork,
+ Context<GerritNetwork> context) {
+
+ return gerritNetwork.getSpec().getIngress().isEnabled()
+ && gerritNetwork.getSpec().getIngress().getAmbassador().getCreateHost();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterHost.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterHost.java
new file mode 100644
index 0000000..5f916c4
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterHost.java
@@ -0,0 +1,62 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import static com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterTLSContext.GERRIT_TLS_CONTEXT;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.getambassador.v2.Host;
+import io.getambassador.v2.HostBuilder;
+import io.getambassador.v2.hostspec.TlsContext;
+import io.getambassador.v2.hostspec.TlsSecret;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+
+public class GerritClusterHost extends AbstractAmbassadorDependentResource<Host> {
+
+ public static final String GERRIT_HOST = "gerrit-ambassador-host";
+
+ public GerritClusterHost() {
+ super(Host.class);
+ }
+
+ @Override
+ public Host desired(GerritNetwork gerritNetwork, Context<GerritNetwork> context) {
+
+ TlsSecret tlsSecret = null;
+ TlsContext tlsContext = null;
+
+ if (gerritNetwork.getSpec().getIngress().getTls().isEnabled()) {
+ tlsSecret = new TlsSecret();
+ tlsContext = new TlsContext();
+ tlsSecret.setName(gerritNetwork.getSpec().getIngress().getTls().getSecret());
+ tlsContext.setName(GERRIT_TLS_CONTEXT);
+ }
+
+ Host host =
+ new HostBuilder()
+ .withNewMetadataLike(
+ getCommonMetadata(gerritNetwork, GERRIT_HOST, this.getClass().getSimpleName()))
+ .endMetadata()
+ .withNewSpec()
+ .withAmbassadorId(getAmbassadorIds(gerritNetwork))
+ .withHostname(gerritNetwork.getSpec().getIngress().getHost())
+ .withTlsSecret(tlsSecret)
+ .withTlsContext(tlsContext)
+ .endSpec()
+ .build();
+
+ return host;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMapping.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMapping.java
new file mode 100644
index 0000000..733be73
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMapping.java
@@ -0,0 +1,53 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.NetworkMemberWithSsh;
+import io.getambassador.v2.Mapping;
+import io.getambassador.v2.MappingBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+
+@KubernetesDependent(resourceDiscriminator = GerritClusterMappingDiscriminator.class)
+public class GerritClusterMapping extends AbstractAmbassadorDependentResource<Mapping>
+ implements MappingDependentResourceInterface {
+
+ public static final String GERRIT_MAPPING = "gerrit-mapping";
+
+ public GerritClusterMapping() {
+ super(Mapping.class);
+ }
+
+ @Override
+ public Mapping desired(GerritNetwork gerritNetwork, Context<GerritNetwork> context) {
+
+ // If only one Gerrit instance in GerritCluster, send all git-over-https requests to it
+ NetworkMemberWithSsh gerrit =
+ gerritNetwork.hasGerritReplica()
+ ? gerritNetwork.getSpec().getGerritReplica()
+ : gerritNetwork.getSpec().getPrimaryGerrit();
+ String serviceName = gerrit.getName() + ":" + gerrit.getHttpPort();
+ Mapping mapping =
+ new MappingBuilder()
+ .withNewMetadataLike(
+ getCommonMetadata(gerritNetwork, GERRIT_MAPPING, this.getClass().getSimpleName()))
+ .endMetadata()
+ .withNewSpecLike(getCommonSpec(gerritNetwork, serviceName))
+ .endSpec()
+ .build();
+ return mapping;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingDiscriminator.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingDiscriminator.java
new file mode 100644
index 0000000..12d99c3
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingDiscriminator.java
@@ -0,0 +1,37 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import static com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterMapping.GERRIT_MAPPING;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.getambassador.v2.Mapping;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.ResourceDiscriminator;
+import io.javaoperatorsdk.operator.processing.event.ResourceID;
+import io.javaoperatorsdk.operator.processing.event.source.informer.InformerEventSource;
+import java.util.Optional;
+
+public class GerritClusterMappingDiscriminator
+ implements ResourceDiscriminator<Mapping, GerritNetwork> {
+ @Override
+ public Optional<Mapping> distinguish(
+ Class<Mapping> resource, GerritNetwork network, Context<GerritNetwork> context) {
+ InformerEventSource<Mapping, GerritNetwork> ies =
+ (InformerEventSource<Mapping, GerritNetwork>)
+ context.eventSourceRetriever().getResourceEventSourceFor(Mapping.class);
+ return ies.get(new ResourceID(GERRIT_MAPPING, network.getMetadata().getNamespace()));
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingGETReplica.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingGETReplica.java
new file mode 100644
index 0000000..8fda99e
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingGETReplica.java
@@ -0,0 +1,68 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import static com.google.gerrit.k8s.operator.network.Constants.INFO_REFS_PATTERN;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.getambassador.v2.Mapping;
+import io.getambassador.v2.MappingBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import java.util.HashMap;
+
+@KubernetesDependent(resourceDiscriminator = GerritClusterMappingGETReplicaDiscriminator.class)
+public class GerritClusterMappingGETReplica extends AbstractAmbassadorDependentResource<Mapping>
+ implements MappingDependentResourceInterface {
+
+ public static final String GERRIT_MAPPING_GET_REPLICA = "gerrit-mapping-get-replica";
+
+ public GerritClusterMappingGETReplica() {
+ super(Mapping.class);
+ }
+
+ @Override
+ public Mapping desired(GerritNetwork gerritNetwork, Context<GerritNetwork> context) {
+
+ String replicaServiceName =
+ gerritNetwork.getSpec().getGerritReplica().getName()
+ + ":"
+ + gerritNetwork.getSpec().getGerritReplica().getHttpPort();
+
+ // Send fetch/clone GET requests to the Replica
+ Mapping mapping =
+ new MappingBuilder()
+ .withNewMetadataLike(
+ getCommonMetadata(
+ gerritNetwork, GERRIT_MAPPING_GET_REPLICA, this.getClass().getSimpleName()))
+ .endMetadata()
+ .withNewSpecLike(getCommonSpec(gerritNetwork, replicaServiceName))
+ .withNewV2QueryParameters()
+ .withAdditionalProperties(
+ new HashMap<String, Object>() {
+ {
+ put("service", "git-upload-pack");
+ }
+ })
+ .endV2QueryParameters()
+ .withMethod("GET")
+ .withPrefix(INFO_REFS_PATTERN)
+ .withPrefixRegex(true)
+ .endSpec()
+ .build();
+
+ return mapping;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingGETReplicaDiscriminator.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingGETReplicaDiscriminator.java
new file mode 100644
index 0000000..9a4da43
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingGETReplicaDiscriminator.java
@@ -0,0 +1,38 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import static com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterMappingGETReplica.GERRIT_MAPPING_GET_REPLICA;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.getambassador.v2.Mapping;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.ResourceDiscriminator;
+import io.javaoperatorsdk.operator.processing.event.ResourceID;
+import io.javaoperatorsdk.operator.processing.event.source.informer.InformerEventSource;
+import java.util.Optional;
+
+public class GerritClusterMappingGETReplicaDiscriminator
+ implements ResourceDiscriminator<Mapping, GerritNetwork> {
+ @Override
+ public Optional<Mapping> distinguish(
+ Class<Mapping> resource, GerritNetwork network, Context<GerritNetwork> context) {
+ InformerEventSource<Mapping, GerritNetwork> ies =
+ (InformerEventSource<Mapping, GerritNetwork>)
+ context.eventSourceRetriever().getResourceEventSourceFor(Mapping.class);
+ return ies.get(
+ new ResourceID(GERRIT_MAPPING_GET_REPLICA, network.getMetadata().getNamespace()));
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingPOSTReplica.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingPOSTReplica.java
new file mode 100644
index 0000000..1779990
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingPOSTReplica.java
@@ -0,0 +1,58 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import static com.google.gerrit.k8s.operator.network.Constants.UPLOAD_PACK_URL_PATTERN;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.getambassador.v2.Mapping;
+import io.getambassador.v2.MappingBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+
+@KubernetesDependent(resourceDiscriminator = GerritClusterMappingPOSTReplicaDiscriminator.class)
+public class GerritClusterMappingPOSTReplica extends AbstractAmbassadorDependentResource<Mapping>
+ implements MappingDependentResourceInterface {
+
+ public static final String GERRIT_MAPPING_POST_REPLICA = "gerrit-mapping-post-replica";
+
+ public GerritClusterMappingPOSTReplica() {
+ super(Mapping.class);
+ }
+
+ @Override
+ public Mapping desired(GerritNetwork gerritNetwork, Context<GerritNetwork> context) {
+
+ String replicaServiceName =
+ gerritNetwork.getSpec().getGerritReplica().getName()
+ + ":"
+ + gerritNetwork.getSpec().getGerritReplica().getHttpPort();
+
+ // Send fetch/clone POST requests to the Replica
+ Mapping mapping =
+ new MappingBuilder()
+ .withNewMetadataLike(
+ getCommonMetadata(
+ gerritNetwork, GERRIT_MAPPING_POST_REPLICA, this.getClass().getSimpleName()))
+ .endMetadata()
+ .withNewSpecLike(getCommonSpec(gerritNetwork, replicaServiceName))
+ .withPrefix(UPLOAD_PACK_URL_PATTERN)
+ .withPrefixRegex(true)
+ .withMethod("POST")
+ .endSpec()
+ .build();
+ return mapping;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingPOSTReplicaDiscriminator.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingPOSTReplicaDiscriminator.java
new file mode 100644
index 0000000..a1c02c8
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingPOSTReplicaDiscriminator.java
@@ -0,0 +1,38 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import static com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterMappingPOSTReplica.GERRIT_MAPPING_POST_REPLICA;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.getambassador.v2.Mapping;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.ResourceDiscriminator;
+import io.javaoperatorsdk.operator.processing.event.ResourceID;
+import io.javaoperatorsdk.operator.processing.event.source.informer.InformerEventSource;
+import java.util.Optional;
+
+public class GerritClusterMappingPOSTReplicaDiscriminator
+ implements ResourceDiscriminator<Mapping, GerritNetwork> {
+ @Override
+ public Optional<Mapping> distinguish(
+ Class<Mapping> resource, GerritNetwork network, Context<GerritNetwork> context) {
+ InformerEventSource<Mapping, GerritNetwork> ies =
+ (InformerEventSource<Mapping, GerritNetwork>)
+ context.eventSourceRetriever().getResourceEventSourceFor(Mapping.class);
+ return ies.get(
+ new ResourceID(GERRIT_MAPPING_POST_REPLICA, network.getMetadata().getNamespace()));
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingPrimary.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingPrimary.java
new file mode 100644
index 0000000..f62b74a
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingPrimary.java
@@ -0,0 +1,55 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.getambassador.v2.Mapping;
+import io.getambassador.v2.MappingBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+
+@KubernetesDependent(resourceDiscriminator = GerritClusterMappingPrimaryDiscriminator.class)
+public class GerritClusterMappingPrimary extends AbstractAmbassadorDependentResource<Mapping>
+ implements MappingDependentResourceInterface {
+
+ public static final String GERRIT_MAPPING_PRIMARY = "gerrit-mapping-primary";
+
+ public GerritClusterMappingPrimary() {
+ super(Mapping.class);
+ }
+
+ @Override
+ public Mapping desired(GerritNetwork gerritNetwork, Context<GerritNetwork> context) {
+
+ String primaryServiceName =
+ gerritNetwork.getSpec().getPrimaryGerrit().getName()
+ + ":"
+ + gerritNetwork.getSpec().getPrimaryGerrit().getHttpPort();
+
+ // Send all write traffic (non git fetch/clone traffic) to the Primary.
+ // Emissary evaluates more constrained Mappings first.
+ Mapping mapping =
+ new MappingBuilder()
+ .withNewMetadataLike(
+ getCommonMetadata(
+ gerritNetwork, GERRIT_MAPPING_PRIMARY, this.getClass().getSimpleName()))
+ .endMetadata()
+ .withNewSpecLike(getCommonSpec(gerritNetwork, primaryServiceName))
+ .endSpec()
+ .build();
+
+ return mapping;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingPrimaryDiscriminator.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingPrimaryDiscriminator.java
new file mode 100644
index 0000000..0d38c69
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingPrimaryDiscriminator.java
@@ -0,0 +1,37 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import static com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterMappingPrimary.GERRIT_MAPPING_PRIMARY;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.getambassador.v2.Mapping;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.ResourceDiscriminator;
+import io.javaoperatorsdk.operator.processing.event.ResourceID;
+import io.javaoperatorsdk.operator.processing.event.source.informer.InformerEventSource;
+import java.util.Optional;
+
+public class GerritClusterMappingPrimaryDiscriminator
+ implements ResourceDiscriminator<Mapping, GerritNetwork> {
+ @Override
+ public Optional<Mapping> distinguish(
+ Class<Mapping> resource, GerritNetwork network, Context<GerritNetwork> context) {
+ InformerEventSource<Mapping, GerritNetwork> ies =
+ (InformerEventSource<Mapping, GerritNetwork>)
+ context.eventSourceRetriever().getResourceEventSourceFor(Mapping.class);
+ return ies.get(new ResourceID(GERRIT_MAPPING_PRIMARY, network.getMetadata().getNamespace()));
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingReceiver.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingReceiver.java
new file mode 100644
index 0000000..2d50b65
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingReceiver.java
@@ -0,0 +1,58 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import static com.google.gerrit.k8s.operator.network.Constants.PROJECTS_URL_PATTERN;
+import static com.google.gerrit.k8s.operator.network.Constants.RECEIVE_PACK_URL_PATTERN;
+
+import com.google.gerrit.k8s.operator.receiver.dependent.ReceiverService;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.getambassador.v2.Mapping;
+import io.getambassador.v2.MappingBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+
+@KubernetesDependent(resourceDiscriminator = GerritClusterMappingReceiverDiscriminator.class)
+public class GerritClusterMappingReceiver extends AbstractAmbassadorDependentResource<Mapping>
+ implements MappingDependentResourceInterface {
+
+ public static final String GERRIT_MAPPING_RECEIVER = "gerrit-mapping-receiver";
+
+ public GerritClusterMappingReceiver() {
+ super(Mapping.class);
+ }
+
+ @Override
+ public Mapping desired(GerritNetwork gerritNetwork, Context<GerritNetwork> context) {
+
+ String receiverServiceName =
+ ReceiverService.getName(gerritNetwork.getSpec().getReceiver().getName())
+ + ":"
+ + gerritNetwork.getSpec().getReceiver().getHttpPort();
+
+ Mapping mapping =
+ new MappingBuilder()
+ .withNewMetadataLike(
+ getCommonMetadata(
+ gerritNetwork, GERRIT_MAPPING_RECEIVER, this.getClass().getSimpleName()))
+ .endMetadata()
+ .withNewSpecLike(getCommonSpec(gerritNetwork, receiverServiceName))
+ .withPrefix(PROJECTS_URL_PATTERN + "|" + RECEIVE_PACK_URL_PATTERN)
+ .withPrefixRegex(true)
+ .endSpec()
+ .build();
+ return mapping;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingReceiverDiscriminator.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingReceiverDiscriminator.java
new file mode 100644
index 0000000..a31e905
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingReceiverDiscriminator.java
@@ -0,0 +1,37 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import static com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterMappingReceiver.GERRIT_MAPPING_RECEIVER;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.getambassador.v2.Mapping;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.ResourceDiscriminator;
+import io.javaoperatorsdk.operator.processing.event.ResourceID;
+import io.javaoperatorsdk.operator.processing.event.source.informer.InformerEventSource;
+import java.util.Optional;
+
+public class GerritClusterMappingReceiverDiscriminator
+ implements ResourceDiscriminator<Mapping, GerritNetwork> {
+ @Override
+ public Optional<Mapping> distinguish(
+ Class<Mapping> resource, GerritNetwork network, Context<GerritNetwork> context) {
+ InformerEventSource<Mapping, GerritNetwork> ies =
+ (InformerEventSource<Mapping, GerritNetwork>)
+ context.eventSourceRetriever().getResourceEventSourceFor(Mapping.class);
+ return ies.get(new ResourceID(GERRIT_MAPPING_RECEIVER, network.getMetadata().getNamespace()));
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingReceiverGET.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingReceiverGET.java
new file mode 100644
index 0000000..aadd9dc
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingReceiverGET.java
@@ -0,0 +1,67 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import static com.google.gerrit.k8s.operator.network.Constants.INFO_REFS_PATTERN;
+
+import com.google.gerrit.k8s.operator.receiver.dependent.ReceiverService;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.getambassador.v2.Mapping;
+import io.getambassador.v2.MappingBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import java.util.HashMap;
+
+@KubernetesDependent(resourceDiscriminator = GerritClusterMappingReceiverGETDiscriminator.class)
+public class GerritClusterMappingReceiverGET extends AbstractAmbassadorDependentResource<Mapping>
+ implements MappingDependentResourceInterface {
+
+ public static final String GERRIT_MAPPING_RECEIVER_GET = "gerrit-mapping-receiver-get";
+
+ public GerritClusterMappingReceiverGET() {
+ super(Mapping.class);
+ }
+
+ @Override
+ public Mapping desired(GerritNetwork gerritNetwork, Context<GerritNetwork> context) {
+
+ String receiverServiceName =
+ ReceiverService.getName(gerritNetwork.getSpec().getReceiver().getName())
+ + ":"
+ + gerritNetwork.getSpec().getReceiver().getHttpPort();
+
+ Mapping mapping =
+ new MappingBuilder()
+ .withNewMetadataLike(
+ getCommonMetadata(
+ gerritNetwork, GERRIT_MAPPING_RECEIVER_GET, this.getClass().getSimpleName()))
+ .endMetadata()
+ .withNewSpecLike(getCommonSpec(gerritNetwork, receiverServiceName))
+ .withNewV2QueryParameters()
+ .withAdditionalProperties(
+ new HashMap<String, Object>() {
+ {
+ put("service", "git-receive-pack");
+ }
+ })
+ .endV2QueryParameters()
+ .withMethod("GET")
+ .withPrefix(INFO_REFS_PATTERN)
+ .withPrefixRegex(true)
+ .endSpec()
+ .build();
+ return mapping;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingReceiverGETDiscriminator.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingReceiverGETDiscriminator.java
new file mode 100644
index 0000000..24cf7f8
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterMappingReceiverGETDiscriminator.java
@@ -0,0 +1,38 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import static com.google.gerrit.k8s.operator.network.ambassador.dependent.GerritClusterMappingReceiverGET.GERRIT_MAPPING_RECEIVER_GET;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.getambassador.v2.Mapping;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.ResourceDiscriminator;
+import io.javaoperatorsdk.operator.processing.event.ResourceID;
+import io.javaoperatorsdk.operator.processing.event.source.informer.InformerEventSource;
+import java.util.Optional;
+
+public class GerritClusterMappingReceiverGETDiscriminator
+ implements ResourceDiscriminator<Mapping, GerritNetwork> {
+ @Override
+ public Optional<Mapping> distinguish(
+ Class<Mapping> resource, GerritNetwork network, Context<GerritNetwork> context) {
+ InformerEventSource<Mapping, GerritNetwork> ies =
+ (InformerEventSource<Mapping, GerritNetwork>)
+ context.eventSourceRetriever().getResourceEventSourceFor(Mapping.class);
+ return ies.get(
+ new ResourceID(GERRIT_MAPPING_RECEIVER_GET, network.getMetadata().getNamespace()));
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterTLSContext.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterTLSContext.java
new file mode 100644
index 0000000..7cae8da
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterTLSContext.java
@@ -0,0 +1,48 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.getambassador.v2.TLSContext;
+import io.getambassador.v2.TLSContextBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import java.util.List;
+
+public class GerritClusterTLSContext extends AbstractAmbassadorDependentResource<TLSContext> {
+
+ public static final String GERRIT_TLS_CONTEXT = "gerrit-tls-context";
+
+ public GerritClusterTLSContext() {
+ super(TLSContext.class);
+ }
+
+ @Override
+ protected TLSContext desired(GerritNetwork gerritNetwork, Context<GerritNetwork> context) {
+ TLSContext tlsContext =
+ new TLSContextBuilder()
+ .withNewMetadataLike(
+ getCommonMetadata(
+ gerritNetwork, GERRIT_TLS_CONTEXT, this.getClass().getSimpleName()))
+ .endMetadata()
+ .withNewSpec()
+ .withAmbassadorId(getAmbassadorIds(gerritNetwork))
+ .withSecret(gerritNetwork.getSpec().getIngress().getTls().getSecret())
+ .withHosts(List.of(gerritNetwork.getSpec().getIngress().getHost()))
+ .withSecretNamespacing(true)
+ .endSpec()
+ .build();
+ return tlsContext;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/LoadBalanceCondition.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/LoadBalanceCondition.java
new file mode 100644
index 0000000..ea9066a
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/LoadBalanceCondition.java
@@ -0,0 +1,35 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.getambassador.v2.Mapping;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.DependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.workflow.Condition;
+
+public class LoadBalanceCondition implements Condition<Mapping, GerritNetwork> {
+
+ @Override
+ public boolean isMet(
+ DependentResource<Mapping, GerritNetwork> dependentResource,
+ GerritNetwork gerritNetwork,
+ Context<GerritNetwork> context) {
+
+ return gerritNetwork.getSpec().getIngress().isEnabled()
+ && gerritNetwork.hasPrimaryGerrit()
+ && gerritNetwork.hasGerritReplica();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/MappingDependentResourceInterface.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/MappingDependentResourceInterface.java
new file mode 100644
index 0000000..dc27428
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/MappingDependentResourceInterface.java
@@ -0,0 +1,23 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.getambassador.v2.Mapping;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+
+public interface MappingDependentResourceInterface {
+ public Mapping desired(GerritNetwork gerritNetwork, Context<GerritNetwork> context);
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/ReceiverMappingCondition.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/ReceiverMappingCondition.java
new file mode 100644
index 0000000..a83f984
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/ReceiverMappingCondition.java
@@ -0,0 +1,33 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.getambassador.v2.Mapping;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.DependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.workflow.Condition;
+
+public class ReceiverMappingCondition implements Condition<Mapping, GerritNetwork> {
+
+ @Override
+ public boolean isMet(
+ DependentResource<Mapping, GerritNetwork> dependentResource,
+ GerritNetwork gerritNetwork,
+ Context<GerritNetwork> context) {
+
+ return gerritNetwork.getSpec().getIngress().isEnabled() && gerritNetwork.hasReceiver();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/SingleMappingCondition.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/SingleMappingCondition.java
new file mode 100644
index 0000000..52a1f2c
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/SingleMappingCondition.java
@@ -0,0 +1,34 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.getambassador.v2.Mapping;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.DependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.workflow.Condition;
+
+public class SingleMappingCondition implements Condition<Mapping, GerritNetwork> {
+
+ @Override
+ public boolean isMet(
+ DependentResource<Mapping, GerritNetwork> dependentResource,
+ GerritNetwork gerritNetwork,
+ Context<GerritNetwork> context) {
+
+ return gerritNetwork.getSpec().getIngress().isEnabled()
+ && (gerritNetwork.hasPrimaryGerrit() ^ gerritNetwork.hasGerritReplica());
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/TLSContextCondition.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/TLSContextCondition.java
new file mode 100644
index 0000000..053c559
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/TLSContextCondition.java
@@ -0,0 +1,34 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.getambassador.v2.Mapping;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.DependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.workflow.Condition;
+
+public class TLSContextCondition implements Condition<Mapping, GerritNetwork> {
+
+ @Override
+ public boolean isMet(
+ DependentResource<Mapping, GerritNetwork> dependentResource,
+ GerritNetwork gerritNetwork,
+ Context<GerritNetwork> context) {
+
+ return gerritNetwork.getSpec().getIngress().isEnabled()
+ && gerritNetwork.getSpec().getIngress().getTls().isEnabled();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ingress/GerritIngressReconciler.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ingress/GerritIngressReconciler.java
new file mode 100644
index 0000000..30c90a9
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ingress/GerritIngressReconciler.java
@@ -0,0 +1,42 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ingress;
+
+import com.google.gerrit.k8s.operator.network.GerritClusterIngressCondition;
+import com.google.gerrit.k8s.operator.network.ingress.dependent.GerritClusterIngress;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import com.google.inject.Singleton;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.ControllerConfiguration;
+import io.javaoperatorsdk.operator.api.reconciler.Reconciler;
+import io.javaoperatorsdk.operator.api.reconciler.UpdateControl;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.Dependent;
+
+@Singleton
+@ControllerConfiguration(
+ dependents = {
+ @Dependent(
+ name = "gerrit-ingress",
+ type = GerritClusterIngress.class,
+ reconcilePrecondition = GerritClusterIngressCondition.class)
+ })
+public class GerritIngressReconciler implements Reconciler<GerritNetwork> {
+
+ @Override
+ public UpdateControl<GerritNetwork> reconcile(
+ GerritNetwork resource, Context<GerritNetwork> context) throws Exception {
+ return UpdateControl.noUpdate();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ingress/dependent/GerritClusterIngress.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ingress/dependent/GerritClusterIngress.java
new file mode 100644
index 0000000..a62e298
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/ingress/dependent/GerritClusterIngress.java
@@ -0,0 +1,247 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ingress.dependent;
+
+import static com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork.SESSION_COOKIE_NAME;
+
+import com.google.gerrit.k8s.operator.gerrit.dependent.GerritService;
+import com.google.gerrit.k8s.operator.receiver.dependent.ReceiverService;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.fabric8.kubernetes.api.model.networking.v1.HTTPIngressPath;
+import io.fabric8.kubernetes.api.model.networking.v1.HTTPIngressPathBuilder;
+import io.fabric8.kubernetes.api.model.networking.v1.Ingress;
+import io.fabric8.kubernetes.api.model.networking.v1.IngressBuilder;
+import io.fabric8.kubernetes.api.model.networking.v1.IngressRule;
+import io.fabric8.kubernetes.api.model.networking.v1.IngressRuleBuilder;
+import io.fabric8.kubernetes.api.model.networking.v1.IngressSpecBuilder;
+import io.fabric8.kubernetes.api.model.networking.v1.IngressTLS;
+import io.fabric8.kubernetes.api.model.networking.v1.IngressTLSBuilder;
+import io.fabric8.kubernetes.api.model.networking.v1.ServiceBackendPort;
+import io.fabric8.kubernetes.api.model.networking.v1.ServiceBackendPortBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.CRUDKubernetesDependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+@KubernetesDependent
+public class GerritClusterIngress extends CRUDKubernetesDependentResource<Ingress, GerritNetwork> {
+ private static final String UPLOAD_PACK_URL_PATTERN = "/.*/git-upload-pack";
+ private static final String RECEIVE_PACK_URL_PATTERN = "/.*/git-receive-pack";
+ public static final String INGRESS_NAME = "gerrit-ingress";
+
+ public GerritClusterIngress() {
+ super(Ingress.class);
+ }
+
+ @Override
+ protected Ingress desired(GerritNetwork gerritNetwork, Context<GerritNetwork> context) {
+ IngressSpecBuilder ingressSpecBuilder =
+ new IngressSpecBuilder().withRules(getIngressRule(gerritNetwork));
+ if (gerritNetwork.getSpec().getIngress().getTls().isEnabled()) {
+ ingressSpecBuilder.withTls(getIngressTLS(gerritNetwork));
+ }
+
+ Ingress gerritIngress =
+ new IngressBuilder()
+ .withNewMetadata()
+ .withName("gerrit-ingress")
+ .withNamespace(gerritNetwork.getMetadata().getNamespace())
+ .withLabels(
+ GerritCluster.getLabels(
+ gerritNetwork.getMetadata().getName(),
+ "gerrit-ingress",
+ this.getClass().getSimpleName()))
+ .withAnnotations(getAnnotations(gerritNetwork))
+ .endMetadata()
+ .withSpec(ingressSpecBuilder.build())
+ .build();
+
+ return gerritIngress;
+ }
+
+ private Map<String, String> getAnnotations(GerritNetwork gerritNetwork) {
+ Map<String, String> annotations = gerritNetwork.getSpec().getIngress().getAnnotations();
+ if (annotations == null) {
+ annotations = new HashMap<>();
+ }
+ annotations.put("nginx.ingress.kubernetes.io/use-regex", "true");
+ annotations.put("kubernetes.io/ingress.class", "nginx");
+
+ String configSnippet = "";
+ if (gerritNetwork.hasPrimaryGerrit() && gerritNetwork.hasGerritReplica()) {
+ String svcName = GerritService.getName(gerritNetwork.getSpec().getGerritReplica().getName());
+ configSnippet =
+ createNginxConfigSnippet(
+ "service=git-upload-pack", gerritNetwork.getMetadata().getNamespace(), svcName);
+ }
+ if (gerritNetwork.hasReceiver()) {
+ String svcName = ReceiverService.getName(gerritNetwork.getSpec().getReceiver().getName());
+ configSnippet =
+ createNginxConfigSnippet(
+ "service=git-receive-pack", gerritNetwork.getMetadata().getNamespace(), svcName);
+ }
+ if (!configSnippet.isBlank()) {
+ annotations.put("nginx.ingress.kubernetes.io/configuration-snippet", configSnippet);
+ }
+
+ annotations.put("nginx.ingress.kubernetes.io/affinity", "cookie");
+ annotations.put("nginx.ingress.kubernetes.io/session-cookie-name", SESSION_COOKIE_NAME);
+ annotations.put("nginx.ingress.kubernetes.io/session-cookie-path", "/");
+ annotations.put("nginx.ingress.kubernetes.io/session-cookie-max-age", "60");
+ annotations.put("nginx.ingress.kubernetes.io/session-cookie-expires", "60");
+
+ return annotations;
+ }
+
+ /**
+ * Creates a config snippet for the Nginx Ingress Controller [1]. This snippet will configure
+ * Nginx to route the request based on the `service` query parameter.
+ *
+ * <p>If it is set to `git-upload-pack` it will route the request to the provided service.
+ *
+ * <p>[1]https://docs.nginx.com/nginx-ingress-controller/configuration/ingress-resources/advanced-configuration-with-snippets/
+ *
+ * @param namespace Namespace of the destination service.
+ * @param svcName Name of the destination service.
+ * @return configuration snippet
+ */
+ private String createNginxConfigSnippet(String queryParam, String namespace, String svcName) {
+ StringBuilder configSnippet = new StringBuilder();
+ configSnippet.append("if ($args ~ ");
+ configSnippet.append(queryParam);
+ configSnippet.append("){");
+ configSnippet.append("\n");
+ configSnippet.append(" set $proxy_upstream_name \"");
+ configSnippet.append(namespace);
+ configSnippet.append("-");
+ configSnippet.append(svcName);
+ configSnippet.append("-");
+ configSnippet.append(GerritService.HTTP_PORT_NAME);
+ configSnippet.append("\";\n");
+ configSnippet.append(" set $proxy_host $proxy_upstream_name;");
+ configSnippet.append("\n");
+ configSnippet.append(" set $service_name \"");
+ configSnippet.append(svcName);
+ configSnippet.append("\";\n}");
+ return configSnippet.toString();
+ }
+
+ private IngressTLS getIngressTLS(GerritNetwork gerritNetwork) {
+ if (gerritNetwork.getSpec().getIngress().getTls().isEnabled()) {
+ return new IngressTLSBuilder()
+ .withHosts(gerritNetwork.getSpec().getIngress().getHost())
+ .withSecretName(gerritNetwork.getSpec().getIngress().getTls().getSecret())
+ .build();
+ }
+ return null;
+ }
+
+ private IngressRule getIngressRule(GerritNetwork gerritNetwork) {
+ List<HTTPIngressPath> ingressPaths = new ArrayList<>();
+ if (gerritNetwork.hasReceiver()) {
+ ingressPaths.addAll(getReceiverIngressPaths(gerritNetwork));
+ }
+ if (gerritNetwork.hasGerrits()) {
+ ingressPaths.addAll(getGerritHTTPIngressPaths(gerritNetwork));
+ }
+
+ if (ingressPaths.isEmpty()) {
+ throw new IllegalStateException(
+ "Failed to create Ingress: No Receiver or Gerrit in GerritCluster.");
+ }
+
+ return new IngressRuleBuilder()
+ .withHost(gerritNetwork.getSpec().getIngress().getHost())
+ .withNewHttp()
+ .withPaths(ingressPaths)
+ .endHttp()
+ .build();
+ }
+
+ private List<HTTPIngressPath> getGerritHTTPIngressPaths(GerritNetwork gerritNetwork) {
+ ServiceBackendPort port =
+ new ServiceBackendPortBuilder().withName(GerritService.HTTP_PORT_NAME).build();
+
+ List<HTTPIngressPath> paths = new ArrayList<>();
+ // Order matters, since routing rules will be applied in order!
+ if (!gerritNetwork.hasPrimaryGerrit() && gerritNetwork.hasGerritReplica()) {
+ paths.add(
+ new HTTPIngressPathBuilder()
+ .withPathType("Prefix")
+ .withPath("/")
+ .withNewBackend()
+ .withNewService()
+ .withName(GerritService.getName(gerritNetwork.getSpec().getGerritReplica().getName()))
+ .withPort(port)
+ .endService()
+ .endBackend()
+ .build());
+ return paths;
+ }
+ if (gerritNetwork.hasGerritReplica()) {
+ paths.add(
+ new HTTPIngressPathBuilder()
+ .withPathType("Prefix")
+ .withPath(UPLOAD_PACK_URL_PATTERN)
+ .withNewBackend()
+ .withNewService()
+ .withName(GerritService.getName(gerritNetwork.getSpec().getGerritReplica().getName()))
+ .withPort(port)
+ .endService()
+ .endBackend()
+ .build());
+ }
+ if (gerritNetwork.hasPrimaryGerrit()) {
+ paths.add(
+ new HTTPIngressPathBuilder()
+ .withPathType("Prefix")
+ .withPath("/")
+ .withNewBackend()
+ .withNewService()
+ .withName(GerritService.getName(gerritNetwork.getSpec().getPrimaryGerrit().getName()))
+ .withPort(port)
+ .endService()
+ .endBackend()
+ .build());
+ }
+ return paths;
+ }
+
+ private List<HTTPIngressPath> getReceiverIngressPaths(GerritNetwork gerritNetwork) {
+ String svcName = ReceiverService.getName(gerritNetwork.getSpec().getReceiver().getName());
+ List<HTTPIngressPath> paths = new ArrayList<>();
+ ServiceBackendPort port =
+ new ServiceBackendPortBuilder().withName(ReceiverService.HTTP_PORT_NAME).build();
+
+ for (String path : List.of("/a/projects", RECEIVE_PACK_URL_PATTERN)) {
+ paths.add(
+ new HTTPIngressPathBuilder()
+ .withPathType("Prefix")
+ .withPath(path)
+ .withNewBackend()
+ .withNewService()
+ .withName(svcName)
+ .withPort(port)
+ .endService()
+ .endBackend()
+ .build());
+ }
+ return paths;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/istio/GerritIstioReconciler.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/istio/GerritIstioReconciler.java
new file mode 100644
index 0000000..4d12941
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/istio/GerritIstioReconciler.java
@@ -0,0 +1,89 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.istio;
+
+import static com.google.gerrit.k8s.operator.network.istio.GerritIstioReconciler.ISTIO_DESTINATION_RULE_EVENT_SOURCE;
+import static com.google.gerrit.k8s.operator.network.istio.GerritIstioReconciler.ISTIO_VIRTUAL_SERVICE_EVENT_SOURCE;
+
+import com.google.gerrit.k8s.operator.network.GerritClusterIngressCondition;
+import com.google.gerrit.k8s.operator.network.istio.dependent.GerritClusterIstioGateway;
+import com.google.gerrit.k8s.operator.network.istio.dependent.GerritIstioCondition;
+import com.google.gerrit.k8s.operator.network.istio.dependent.GerritIstioDestinationRule;
+import com.google.gerrit.k8s.operator.network.istio.dependent.GerritIstioVirtualService;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import com.google.inject.Singleton;
+import io.fabric8.istio.api.networking.v1beta1.DestinationRule;
+import io.fabric8.istio.api.networking.v1beta1.VirtualService;
+import io.javaoperatorsdk.operator.api.config.informer.InformerConfiguration;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.ControllerConfiguration;
+import io.javaoperatorsdk.operator.api.reconciler.EventSourceContext;
+import io.javaoperatorsdk.operator.api.reconciler.EventSourceInitializer;
+import io.javaoperatorsdk.operator.api.reconciler.Reconciler;
+import io.javaoperatorsdk.operator.api.reconciler.UpdateControl;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.Dependent;
+import io.javaoperatorsdk.operator.processing.event.source.EventSource;
+import io.javaoperatorsdk.operator.processing.event.source.informer.InformerEventSource;
+import java.util.HashMap;
+import java.util.Map;
+
+@Singleton
+@ControllerConfiguration(
+ dependents = {
+ @Dependent(
+ name = "gerrit-destination-rules",
+ type = GerritIstioDestinationRule.class,
+ reconcilePrecondition = GerritIstioCondition.class,
+ useEventSourceWithName = ISTIO_DESTINATION_RULE_EVENT_SOURCE),
+ @Dependent(
+ name = "gerrit-istio-gateway",
+ type = GerritClusterIstioGateway.class,
+ reconcilePrecondition = GerritClusterIngressCondition.class),
+ @Dependent(
+ name = "gerrit-istio-virtual-service",
+ type = GerritIstioVirtualService.class,
+ reconcilePrecondition = GerritIstioCondition.class,
+ dependsOn = {"gerrit-istio-gateway"},
+ useEventSourceWithName = ISTIO_VIRTUAL_SERVICE_EVENT_SOURCE),
+ })
+public class GerritIstioReconciler
+ implements Reconciler<GerritNetwork>, EventSourceInitializer<GerritNetwork> {
+ public static final String ISTIO_DESTINATION_RULE_EVENT_SOURCE =
+ "gerrit-cluster-istio-destination-rule";
+ public static final String ISTIO_VIRTUAL_SERVICE_EVENT_SOURCE =
+ "gerrit-cluster-istio-virtual-service";
+
+ @Override
+ public Map<String, EventSource> prepareEventSources(EventSourceContext<GerritNetwork> context) {
+ InformerEventSource<DestinationRule, GerritNetwork> gerritIstioDestinationRuleEventSource =
+ new InformerEventSource<>(
+ InformerConfiguration.from(DestinationRule.class, context).build(), context);
+
+ InformerEventSource<VirtualService, GerritNetwork> virtualServiceEventSource =
+ new InformerEventSource<>(
+ InformerConfiguration.from(VirtualService.class, context).build(), context);
+
+ Map<String, EventSource> eventSources = new HashMap<>();
+ eventSources.put(ISTIO_DESTINATION_RULE_EVENT_SOURCE, gerritIstioDestinationRuleEventSource);
+ eventSources.put(ISTIO_VIRTUAL_SERVICE_EVENT_SOURCE, virtualServiceEventSource);
+ return eventSources;
+ }
+
+ @Override
+ public UpdateControl<GerritNetwork> reconcile(
+ GerritNetwork resource, Context<GerritNetwork> context) throws Exception {
+ return UpdateControl.noUpdate();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/istio/dependent/GerritClusterIstioGateway.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/istio/dependent/GerritClusterIstioGateway.java
new file mode 100644
index 0000000..099a4ed
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/istio/dependent/GerritClusterIstioGateway.java
@@ -0,0 +1,115 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.istio.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.fabric8.istio.api.networking.v1beta1.Gateway;
+import io.fabric8.istio.api.networking.v1beta1.GatewayBuilder;
+import io.fabric8.istio.api.networking.v1beta1.Server;
+import io.fabric8.istio.api.networking.v1beta1.ServerBuilder;
+import io.fabric8.istio.api.networking.v1beta1.ServerTLSSettingsTLSmode;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.CRUDKubernetesDependentResource;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public class GerritClusterIstioGateway
+ extends CRUDKubernetesDependentResource<Gateway, GerritNetwork> {
+ public static final String NAME = "gerrit-istio-gateway";
+
+ public GerritClusterIstioGateway() {
+ super(Gateway.class);
+ }
+
+ @Override
+ public Gateway desired(GerritNetwork gerritNetwork, Context<GerritNetwork> context) {
+ return new GatewayBuilder()
+ .withNewMetadata()
+ .withName(NAME)
+ .withNamespace(gerritNetwork.getMetadata().getNamespace())
+ .withLabels(
+ GerritCluster.getLabels(
+ gerritNetwork.getMetadata().getName(), NAME, this.getClass().getSimpleName()))
+ .endMetadata()
+ .withNewSpec()
+ .withSelector(Map.of("istio", "ingressgateway"))
+ .withServers(configureServers(gerritNetwork))
+ .endSpec()
+ .build();
+ }
+
+ private List<Server> configureServers(GerritNetwork gerritNetwork) {
+ List<Server> servers = new ArrayList<>();
+ String gerritClusterHost = gerritNetwork.getSpec().getIngress().getHost();
+
+ servers.add(
+ new ServerBuilder()
+ .withNewPort()
+ .withName("http")
+ .withNumber(80)
+ .withProtocol("HTTP")
+ .endPort()
+ .withHosts(gerritClusterHost)
+ .withNewTls()
+ .withHttpsRedirect(gerritNetwork.getSpec().getIngress().getTls().isEnabled())
+ .endTls()
+ .build());
+
+ if (gerritNetwork.getSpec().getIngress().getTls().isEnabled()) {
+ servers.add(
+ new ServerBuilder()
+ .withNewPort()
+ .withName("https")
+ .withNumber(443)
+ .withProtocol("HTTPS")
+ .endPort()
+ .withHosts(gerritClusterHost)
+ .withNewTls()
+ .withMode(ServerTLSSettingsTLSmode.SIMPLE)
+ .withCredentialName(gerritNetwork.getSpec().getIngress().getTls().getSecret())
+ .endTls()
+ .build());
+ }
+
+ if (gerritNetwork.getSpec().getIngress().getSsh().isEnabled() && gerritNetwork.hasGerrits()) {
+ if (gerritNetwork.hasPrimaryGerrit()) {
+ servers.add(
+ new ServerBuilder()
+ .withNewPort()
+ .withName("ssh-primary")
+ .withNumber(gerritNetwork.getSpec().getPrimaryGerrit().getSshPort())
+ .withProtocol("TCP")
+ .endPort()
+ .withHosts(gerritClusterHost)
+ .build());
+ }
+ if (gerritNetwork.hasGerritReplica()) {
+ servers.add(
+ new ServerBuilder()
+ .withNewPort()
+ .withName("ssh-replica")
+ .withNumber(gerritNetwork.getSpec().getGerritReplica().getSshPort())
+ .withProtocol("TCP")
+ .endPort()
+ .withHosts(gerritClusterHost)
+ .build());
+ }
+ }
+
+ return servers;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/istio/dependent/GerritIstioCondition.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/istio/dependent/GerritIstioCondition.java
new file mode 100644
index 0000000..ac6c6c3
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/istio/dependent/GerritIstioCondition.java
@@ -0,0 +1,34 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.istio.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.fabric8.istio.api.networking.v1beta1.VirtualService;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.DependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.workflow.Condition;
+
+public class GerritIstioCondition implements Condition<VirtualService, GerritNetwork> {
+
+ @Override
+ public boolean isMet(
+ DependentResource<VirtualService, GerritNetwork> dependentResource,
+ GerritNetwork gerritNetwork,
+ Context<GerritNetwork> context) {
+
+ return gerritNetwork.getSpec().getIngress().isEnabled()
+ && (gerritNetwork.hasGerrits() || gerritNetwork.hasReceiver());
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/istio/dependent/GerritIstioDestinationRule.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/istio/dependent/GerritIstioDestinationRule.java
new file mode 100644
index 0000000..98f8267
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/istio/dependent/GerritIstioDestinationRule.java
@@ -0,0 +1,131 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.istio.dependent;
+
+import static com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork.SESSION_COOKIE_NAME;
+import static com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork.SESSION_COOKIE_TTL;
+
+import com.google.gerrit.k8s.operator.gerrit.dependent.GerritService;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplate;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.fabric8.istio.api.networking.v1beta1.DestinationRule;
+import io.fabric8.istio.api.networking.v1beta1.DestinationRuleBuilder;
+import io.fabric8.istio.api.networking.v1beta1.LoadBalancerSettingsSimpleLB;
+import io.fabric8.istio.api.networking.v1beta1.TrafficPolicy;
+import io.fabric8.istio.api.networking.v1beta1.TrafficPolicyBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.Deleter;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.GarbageCollected;
+import io.javaoperatorsdk.operator.processing.dependent.BulkDependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.Creator;
+import io.javaoperatorsdk.operator.processing.dependent.Updater;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependentResource;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+public class GerritIstioDestinationRule
+ extends KubernetesDependentResource<DestinationRule, GerritNetwork>
+ implements Creator<DestinationRule, GerritNetwork>,
+ Updater<DestinationRule, GerritNetwork>,
+ Deleter<GerritNetwork>,
+ BulkDependentResource<DestinationRule, GerritNetwork>,
+ GarbageCollected<GerritNetwork> {
+
+ public GerritIstioDestinationRule() {
+ super(DestinationRule.class);
+ }
+
+ protected DestinationRule desired(
+ GerritNetwork gerritNetwork, String gerritName, boolean isReplica) {
+
+ return new DestinationRuleBuilder()
+ .withNewMetadata()
+ .withName(getName(gerritName))
+ .withNamespace(gerritNetwork.getMetadata().getNamespace())
+ .withLabels(
+ GerritCluster.getLabels(
+ gerritNetwork.getMetadata().getName(),
+ getName(gerritName),
+ this.getClass().getSimpleName()))
+ .endMetadata()
+ .withNewSpec()
+ .withHost(GerritService.getHostname(gerritName, gerritNetwork.getMetadata().getNamespace()))
+ .withTrafficPolicy(getTrafficPolicy(isReplica))
+ .endSpec()
+ .build();
+ }
+
+ private TrafficPolicy getTrafficPolicy(boolean isReplica) {
+ if (isReplica) {
+ return new TrafficPolicyBuilder()
+ .withNewLoadBalancer()
+ .withNewLoadBalancerSettingsSimpleLbPolicy()
+ .withSimple(LoadBalancerSettingsSimpleLB.LEAST_CONN)
+ .endLoadBalancerSettingsSimpleLbPolicy()
+ .endLoadBalancer()
+ .build();
+ }
+ return new TrafficPolicyBuilder()
+ .withNewLoadBalancer()
+ .withNewLoadBalancerSettingsConsistentHashLbPolicy()
+ .withNewConsistentHash()
+ .withNewLoadBalancerSettingsConsistentHashLBHttpCookieKey()
+ .withNewHttpCookie()
+ .withName(SESSION_COOKIE_NAME)
+ .withTtl(SESSION_COOKIE_TTL)
+ .endHttpCookie()
+ .endLoadBalancerSettingsConsistentHashLBHttpCookieKey()
+ .endConsistentHash()
+ .endLoadBalancerSettingsConsistentHashLbPolicy()
+ .endLoadBalancer()
+ .build();
+ }
+
+ public static String getName(GerritTemplate gerrit) {
+ return gerrit.getMetadata().getName();
+ }
+
+ public static String getName(String gerritName) {
+ return gerritName;
+ }
+
+ @Override
+ public Map<String, DestinationRule> desiredResources(
+ GerritNetwork gerritNetwork, Context<GerritNetwork> context) {
+ Map<String, DestinationRule> drs = new HashMap<>();
+ if (gerritNetwork.hasPrimaryGerrit()) {
+ String primaryGerritName = gerritNetwork.getSpec().getPrimaryGerrit().getName();
+ drs.put(primaryGerritName, desired(gerritNetwork, primaryGerritName, false));
+ }
+ if (gerritNetwork.hasGerritReplica()) {
+ String gerritReplicaName = gerritNetwork.getSpec().getGerritReplica().getName();
+ drs.put(gerritReplicaName, desired(gerritNetwork, gerritReplicaName, true));
+ }
+ return drs;
+ }
+
+ @Override
+ public Map<String, DestinationRule> getSecondaryResources(
+ GerritNetwork gerritNetwork, Context<GerritNetwork> context) {
+ Set<DestinationRule> drs = context.getSecondaryResources(DestinationRule.class);
+ Map<String, DestinationRule> result = new HashMap<>(drs.size());
+ for (DestinationRule dr : drs) {
+ result.put(dr.getMetadata().getName(), dr);
+ }
+ return result;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/istio/dependent/GerritIstioVirtualService.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/istio/dependent/GerritIstioVirtualService.java
new file mode 100644
index 0000000..f0d683c
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/istio/dependent/GerritIstioVirtualService.java
@@ -0,0 +1,238 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.istio.dependent;
+
+import com.google.gerrit.k8s.operator.gerrit.dependent.GerritService;
+import com.google.gerrit.k8s.operator.receiver.dependent.ReceiverService;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.NetworkMember;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.NetworkMemberWithSsh;
+import io.fabric8.istio.api.networking.v1beta1.HTTPMatchRequest;
+import io.fabric8.istio.api.networking.v1beta1.HTTPMatchRequestBuilder;
+import io.fabric8.istio.api.networking.v1beta1.HTTPRoute;
+import io.fabric8.istio.api.networking.v1beta1.HTTPRouteBuilder;
+import io.fabric8.istio.api.networking.v1beta1.HTTPRouteDestination;
+import io.fabric8.istio.api.networking.v1beta1.HTTPRouteDestinationBuilder;
+import io.fabric8.istio.api.networking.v1beta1.L4MatchAttributesBuilder;
+import io.fabric8.istio.api.networking.v1beta1.RouteDestination;
+import io.fabric8.istio.api.networking.v1beta1.RouteDestinationBuilder;
+import io.fabric8.istio.api.networking.v1beta1.StringMatchBuilder;
+import io.fabric8.istio.api.networking.v1beta1.TCPRoute;
+import io.fabric8.istio.api.networking.v1beta1.TCPRouteBuilder;
+import io.fabric8.istio.api.networking.v1beta1.VirtualService;
+import io.fabric8.istio.api.networking.v1beta1.VirtualServiceBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.CRUDKubernetesDependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+@KubernetesDependent
+public class GerritIstioVirtualService
+ extends CRUDKubernetesDependentResource<VirtualService, GerritNetwork> {
+ private static final String INFO_REF_URL_PATTERN = "^/(.*)/info/refs$";
+ private static final String UPLOAD_PACK_URL_PATTERN = "^/(.*)/git-upload-pack$";
+ private static final String RECEIVE_PACK_URL_PATTERN = "^/(.*)/git-receive-pack$";
+ public static final String NAME_SUFFIX = "gerrit-http-virtual-service";
+
+ public GerritIstioVirtualService() {
+ super(VirtualService.class);
+ }
+
+ @Override
+ protected VirtualService desired(GerritNetwork gerritNetwork, Context<GerritNetwork> context) {
+ String gerritClusterHost = gerritNetwork.getSpec().getIngress().getHost();
+ String namespace = gerritNetwork.getMetadata().getNamespace();
+
+ return new VirtualServiceBuilder()
+ .withNewMetadata()
+ .withName(gerritNetwork.getDependentResourceName(NAME_SUFFIX))
+ .withNamespace(namespace)
+ .withLabels(
+ GerritCluster.getLabels(
+ gerritNetwork.getMetadata().getName(),
+ gerritNetwork.getDependentResourceName(NAME_SUFFIX),
+ this.getClass().getSimpleName()))
+ .endMetadata()
+ .withNewSpec()
+ .withHosts(gerritClusterHost)
+ .withGateways(namespace + "/" + GerritClusterIstioGateway.NAME)
+ .withHttp(getHTTPRoutes(gerritNetwork))
+ .withTcp(getTCPRoutes(gerritNetwork))
+ .endSpec()
+ .build();
+ }
+
+ private List<HTTPRoute> getHTTPRoutes(GerritNetwork gerritNetwork) {
+ String namespace = gerritNetwork.getMetadata().getNamespace();
+ List<HTTPRoute> routes = new ArrayList<>();
+ if (gerritNetwork.hasReceiver()) {
+ routes.add(
+ new HTTPRouteBuilder()
+ .withName("receiver-" + gerritNetwork.getSpec().getReceiver().getName())
+ .withMatch(getReceiverMatches())
+ .withRoute(
+ getReceiverHTTPDestination(gerritNetwork.getSpec().getReceiver(), namespace))
+ .build());
+ }
+ if (gerritNetwork.hasGerritReplica()) {
+ HTTPRouteBuilder routeBuilder =
+ new HTTPRouteBuilder()
+ .withName("gerrit-replica-" + gerritNetwork.getSpec().getGerritReplica().getName());
+ if (gerritNetwork.hasPrimaryGerrit()) {
+ routeBuilder = routeBuilder.withMatch(getGerritReplicaMatches());
+ }
+ routes.add(
+ routeBuilder
+ .withRoute(
+ getGerritHTTPDestinations(gerritNetwork.getSpec().getGerritReplica(), namespace))
+ .build());
+ }
+ if (gerritNetwork.hasPrimaryGerrit()) {
+ routes.add(
+ new HTTPRouteBuilder()
+ .withName("gerrit-primary-" + gerritNetwork.getSpec().getPrimaryGerrit().getName())
+ .withRoute(
+ getGerritHTTPDestinations(gerritNetwork.getSpec().getPrimaryGerrit(), namespace))
+ .build());
+ }
+
+ return routes;
+ }
+
+ private HTTPRouteDestination getGerritHTTPDestinations(
+ NetworkMemberWithSsh networkMember, String namespace) {
+ return new HTTPRouteDestinationBuilder()
+ .withNewDestination()
+ .withHost(GerritService.getHostname(networkMember.getName(), namespace))
+ .withNewPort()
+ .withNumber(networkMember.getHttpPort())
+ .endPort()
+ .endDestination()
+ .build();
+ }
+
+ private List<HTTPMatchRequest> getGerritReplicaMatches() {
+ List<HTTPMatchRequest> matches = new ArrayList<>();
+ matches.add(
+ new HTTPMatchRequestBuilder()
+ .withNewUri()
+ .withNewStringMatchRegexType()
+ .withRegex(INFO_REF_URL_PATTERN)
+ .endStringMatchRegexType()
+ .endUri()
+ .withQueryParams(
+ Map.of(
+ "service",
+ new StringMatchBuilder()
+ .withNewStringMatchExactType("git-upload-pack")
+ .build()))
+ .withIgnoreUriCase()
+ .withNewMethod()
+ .withNewStringMatchExactType()
+ .withExact("GET")
+ .endStringMatchExactType()
+ .endMethod()
+ .build());
+ matches.add(
+ new HTTPMatchRequestBuilder()
+ .withNewUri()
+ .withNewStringMatchRegexType()
+ .withRegex(UPLOAD_PACK_URL_PATTERN)
+ .endStringMatchRegexType()
+ .endUri()
+ .withIgnoreUriCase()
+ .withNewMethod()
+ .withNewStringMatchExactType()
+ .withExact("POST")
+ .endStringMatchExactType()
+ .endMethod()
+ .build());
+ return matches;
+ }
+
+ private HTTPRouteDestination getReceiverHTTPDestination(
+ NetworkMember receiver, String namespace) {
+ return new HTTPRouteDestinationBuilder()
+ .withNewDestination()
+ .withHost(ReceiverService.getHostname(receiver.getName(), namespace))
+ .withNewPort()
+ .withNumber(receiver.getHttpPort())
+ .endPort()
+ .endDestination()
+ .build();
+ }
+
+ private List<HTTPMatchRequest> getReceiverMatches() {
+ List<HTTPMatchRequest> matches = new ArrayList<>();
+ matches.add(
+ new HTTPMatchRequestBuilder()
+ .withUri(new StringMatchBuilder().withNewStringMatchPrefixType("/a/projects/").build())
+ .build());
+ matches.add(
+ new HTTPMatchRequestBuilder()
+ .withNewUri()
+ .withNewStringMatchRegexType()
+ .withRegex(RECEIVE_PACK_URL_PATTERN)
+ .endStringMatchRegexType()
+ .endUri()
+ .build());
+ matches.add(
+ new HTTPMatchRequestBuilder()
+ .withNewUri()
+ .withNewStringMatchRegexType()
+ .withRegex(INFO_REF_URL_PATTERN)
+ .endStringMatchRegexType()
+ .endUri()
+ .withQueryParams(
+ Map.of(
+ "service",
+ new StringMatchBuilder()
+ .withNewStringMatchExactType("git-receive-pack")
+ .build()))
+ .build());
+ return matches;
+ }
+
+ private List<TCPRoute> getTCPRoutes(GerritNetwork gerritNetwork) {
+ List<TCPRoute> routes = new ArrayList<>();
+ for (NetworkMemberWithSsh gerrit : gerritNetwork.getSpec().getGerrits()) {
+ if (gerritNetwork.getSpec().getIngress().getSsh().isEnabled() && gerrit.getSshPort() > 0) {
+ routes.add(
+ new TCPRouteBuilder()
+ .withMatch(
+ List.of(new L4MatchAttributesBuilder().withPort(gerrit.getSshPort()).build()))
+ .withRoute(
+ getGerritTCPDestination(gerrit, gerritNetwork.getMetadata().getNamespace()))
+ .build());
+ }
+ }
+ return routes;
+ }
+
+ private RouteDestination getGerritTCPDestination(
+ NetworkMemberWithSsh networkMember, String namespace) {
+ return new RouteDestinationBuilder()
+ .withNewDestination()
+ .withHost(GerritService.getHostname(networkMember.getName(), namespace))
+ .withNewPort()
+ .withNumber(networkMember.getSshPort())
+ .endPort()
+ .endDestination()
+ .build();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/none/GerritNoIngressReconciler.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/none/GerritNoIngressReconciler.java
new file mode 100644
index 0000000..2179260
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/network/none/GerritNoIngressReconciler.java
@@ -0,0 +1,33 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.none;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import com.google.inject.Singleton;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.ControllerConfiguration;
+import io.javaoperatorsdk.operator.api.reconciler.Reconciler;
+import io.javaoperatorsdk.operator.api.reconciler.UpdateControl;
+
+@Singleton
+@ControllerConfiguration
+public class GerritNoIngressReconciler implements Reconciler<GerritNetwork> {
+
+ @Override
+ public UpdateControl<GerritNetwork> reconcile(
+ GerritNetwork resource, Context<GerritNetwork> context) throws Exception {
+ return UpdateControl.noUpdate();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/receiver/ReceiverReconciler.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/receiver/ReceiverReconciler.java
new file mode 100644
index 0000000..b48a05f
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/receiver/ReceiverReconciler.java
@@ -0,0 +1,149 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.receiver;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.k8s.operator.receiver.dependent.ReceiverDeployment;
+import com.google.gerrit.k8s.operator.receiver.dependent.ReceiverService;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.Receiver;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.ReceiverStatus;
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+import io.fabric8.kubernetes.api.model.Secret;
+import io.fabric8.kubernetes.client.KubernetesClient;
+import io.javaoperatorsdk.operator.api.config.informer.InformerConfiguration;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.ControllerConfiguration;
+import io.javaoperatorsdk.operator.api.reconciler.EventSourceContext;
+import io.javaoperatorsdk.operator.api.reconciler.EventSourceInitializer;
+import io.javaoperatorsdk.operator.api.reconciler.Reconciler;
+import io.javaoperatorsdk.operator.api.reconciler.UpdateControl;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.Dependent;
+import io.javaoperatorsdk.operator.processing.event.ResourceID;
+import io.javaoperatorsdk.operator.processing.event.source.EventSource;
+import io.javaoperatorsdk.operator.processing.event.source.SecondaryToPrimaryMapper;
+import io.javaoperatorsdk.operator.processing.event.source.informer.InformerEventSource;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+@Singleton
+@ControllerConfiguration(
+ dependents = {
+ @Dependent(name = "receiver-deployment", type = ReceiverDeployment.class),
+ @Dependent(
+ name = "receiver-service",
+ type = ReceiverService.class,
+ dependsOn = {"receiver-deployment"})
+ })
+public class ReceiverReconciler implements Reconciler<Receiver>, EventSourceInitializer<Receiver> {
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+ private static final String SECRET_EVENT_SOURCE_NAME = "secret-event-source";
+ private final KubernetesClient client;
+
+ @Inject
+ public ReceiverReconciler(KubernetesClient client) {
+ this.client = client;
+ }
+
+ @Override
+ public Map<String, EventSource> prepareEventSources(EventSourceContext<Receiver> context) {
+ final SecondaryToPrimaryMapper<Secret> secretMapper =
+ (Secret secret) ->
+ context
+ .getPrimaryCache()
+ .list(
+ receiver ->
+ receiver
+ .getSpec()
+ .getCredentialSecretRef()
+ .equals(secret.getMetadata().getName()))
+ .map(ResourceID::fromResource)
+ .collect(Collectors.toSet());
+
+ InformerEventSource<Secret, Receiver> secretEventSource =
+ new InformerEventSource<>(
+ InformerConfiguration.from(Secret.class, context)
+ .withSecondaryToPrimaryMapper(secretMapper)
+ .build(),
+ context);
+
+ Map<String, EventSource> eventSources = new HashMap<>();
+ eventSources.put(SECRET_EVENT_SOURCE_NAME, secretEventSource);
+ return eventSources;
+ }
+
+ @Override
+ public UpdateControl<Receiver> reconcile(Receiver receiver, Context<Receiver> context)
+ throws Exception {
+ if (receiver.getStatus() != null && isReceiverRestartRequired(receiver, context)) {
+ restartReceiverDeployment(receiver);
+ }
+
+ return UpdateControl.patchStatus(updateStatus(receiver, context));
+ }
+
+ void restartReceiverDeployment(Receiver receiver) {
+ logger.atInfo().log(
+ "Restarting Receiver %s due to configuration change.", receiver.getMetadata().getName());
+ client
+ .apps()
+ .deployments()
+ .inNamespace(receiver.getMetadata().getNamespace())
+ .withName(receiver.getMetadata().getName())
+ .rolling()
+ .restart();
+ }
+
+ private Receiver updateStatus(Receiver receiver, Context<Receiver> context) {
+ ReceiverStatus status = receiver.getStatus();
+ if (status == null) {
+ status = new ReceiverStatus();
+ }
+
+ Secret sec =
+ client
+ .secrets()
+ .inNamespace(receiver.getMetadata().getNamespace())
+ .withName(receiver.getSpec().getCredentialSecretRef())
+ .get();
+
+ if (sec != null) {
+ status.setAppliedCredentialSecretVersion(sec.getMetadata().getResourceVersion());
+ }
+
+ receiver.setStatus(status);
+ return receiver;
+ }
+
+ private boolean isReceiverRestartRequired(Receiver receiver, Context<Receiver> context) {
+ String secVersion =
+ client
+ .secrets()
+ .inNamespace(receiver.getMetadata().getNamespace())
+ .withName(receiver.getSpec().getCredentialSecretRef())
+ .get()
+ .getMetadata()
+ .getResourceVersion();
+ String appliedSecVersion = receiver.getStatus().getAppliedCredentialSecretVersion();
+ if (!secVersion.equals(appliedSecVersion)) {
+ logger.atFine().log(
+ "Looking up Secret: %s; Installed secret resource version: %s; Resource version known to the Receiver: %s",
+ receiver.getSpec().getCredentialSecretRef(), secVersion, appliedSecVersion);
+ return true;
+ }
+ return false;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/receiver/dependent/ReceiverDeployment.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/receiver/dependent/ReceiverDeployment.java
new file mode 100644
index 0000000..16c0072
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/receiver/dependent/ReceiverDeployment.java
@@ -0,0 +1,188 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.receiver.dependent;
+
+import com.google.gerrit.k8s.operator.receiver.ReceiverReconciler;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.Receiver;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.NfsWorkaroundConfig;
+import io.fabric8.kubernetes.api.model.Container;
+import io.fabric8.kubernetes.api.model.ContainerPort;
+import io.fabric8.kubernetes.api.model.Volume;
+import io.fabric8.kubernetes.api.model.VolumeBuilder;
+import io.fabric8.kubernetes.api.model.VolumeMount;
+import io.fabric8.kubernetes.api.model.VolumeMountBuilder;
+import io.fabric8.kubernetes.api.model.apps.Deployment;
+import io.fabric8.kubernetes.api.model.apps.DeploymentBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.CRUDKubernetesDependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+@KubernetesDependent
+public class ReceiverDeployment extends CRUDKubernetesDependentResource<Deployment, Receiver> {
+ public static final int HTTP_PORT = 80;
+
+ public ReceiverDeployment() {
+ super(Deployment.class);
+ }
+
+ @Override
+ protected Deployment desired(Receiver receiver, Context<Receiver> context) {
+ DeploymentBuilder deploymentBuilder = new DeploymentBuilder();
+
+ List<Container> initContainers = new ArrayList<>();
+
+ NfsWorkaroundConfig nfsWorkaround =
+ receiver.getSpec().getStorage().getStorageClasses().getNfsWorkaround();
+ if (nfsWorkaround.isEnabled() && nfsWorkaround.isChownOnStartup()) {
+ initContainers.add(
+ GerritCluster.createNfsInitContainer(
+ receiver
+ .getSpec()
+ .getStorage()
+ .getStorageClasses()
+ .getNfsWorkaround()
+ .getIdmapdConfig()
+ != null,
+ receiver.getSpec().getContainerImages()));
+ }
+
+ deploymentBuilder
+ .withApiVersion("apps/v1")
+ .withNewMetadata()
+ .withName(receiver.getMetadata().getName())
+ .withNamespace(receiver.getMetadata().getNamespace())
+ .withLabels(getLabels(receiver))
+ .endMetadata()
+ .withNewSpec()
+ .withReplicas(receiver.getSpec().getReplicas())
+ .withNewStrategy()
+ .withNewRollingUpdate()
+ .withMaxSurge(receiver.getSpec().getMaxSurge())
+ .withMaxUnavailable(receiver.getSpec().getMaxUnavailable())
+ .endRollingUpdate()
+ .endStrategy()
+ .withNewSelector()
+ .withMatchLabels(getSelectorLabels(receiver))
+ .endSelector()
+ .withNewTemplate()
+ .withNewMetadata()
+ .withLabels(getLabels(receiver))
+ .endMetadata()
+ .withNewSpec()
+ .withTolerations(receiver.getSpec().getTolerations())
+ .withTopologySpreadConstraints(receiver.getSpec().getTopologySpreadConstraints())
+ .withAffinity(receiver.getSpec().getAffinity())
+ .withPriorityClassName(receiver.getSpec().getPriorityClassName())
+ .addAllToImagePullSecrets(receiver.getSpec().getContainerImages().getImagePullSecrets())
+ .withNewSecurityContext()
+ .withFsGroup(100L)
+ .endSecurityContext()
+ .addAllToInitContainers(initContainers)
+ .addNewContainer()
+ .withName("apache-git-http-backend")
+ .withImagePullPolicy(receiver.getSpec().getContainerImages().getImagePullPolicy())
+ .withImage(
+ receiver
+ .getSpec()
+ .getContainerImages()
+ .getGerritImages()
+ .getFullImageName("apache-git-http-backend"))
+ .withEnv(GerritCluster.getPodNameEnvVar())
+ .withPorts(getContainerPorts(receiver))
+ .withResources(receiver.getSpec().getResources())
+ .withReadinessProbe(receiver.getSpec().getReadinessProbe())
+ .withLivenessProbe(receiver.getSpec().getLivenessProbe())
+ .addAllToVolumeMounts(getVolumeMounts(receiver, false))
+ .endContainer()
+ .addAllToVolumes(getVolumes(receiver))
+ .endSpec()
+ .endTemplate()
+ .endSpec();
+
+ return deploymentBuilder.build();
+ }
+
+ private static String getComponentName(Receiver receiver) {
+ return String.format("receiver-deployment-%s", receiver.getMetadata().getName());
+ }
+
+ public static Map<String, String> getSelectorLabels(Receiver receiver) {
+ return GerritCluster.getSelectorLabels(
+ receiver.getMetadata().getName(), getComponentName(receiver));
+ }
+
+ public static Map<String, String> getLabels(Receiver receiver) {
+ return GerritCluster.getLabels(
+ receiver.getMetadata().getName(),
+ getComponentName(receiver),
+ ReceiverReconciler.class.getSimpleName());
+ }
+
+ private Set<Volume> getVolumes(Receiver receiver) {
+ Set<Volume> volumes = new HashSet<>();
+ volumes.add(
+ GerritCluster.getSharedVolume(
+ receiver.getSpec().getStorage().getSharedStorage().getExternalPVC()));
+
+ volumes.add(
+ new VolumeBuilder()
+ .withName(receiver.getSpec().getCredentialSecretRef())
+ .withNewSecret()
+ .withSecretName(receiver.getSpec().getCredentialSecretRef())
+ .endSecret()
+ .build());
+
+ NfsWorkaroundConfig nfsWorkaround =
+ receiver.getSpec().getStorage().getStorageClasses().getNfsWorkaround();
+ if (nfsWorkaround.isEnabled() && nfsWorkaround.getIdmapdConfig() != null) {
+ volumes.add(GerritCluster.getNfsImapdConfigVolume());
+ }
+
+ return volumes;
+ }
+
+ private Set<VolumeMount> getVolumeMounts(Receiver receiver, boolean isInitContainer) {
+ Set<VolumeMount> volumeMounts = new HashSet<>();
+ volumeMounts.add(GerritCluster.getGitRepositoriesVolumeMount("/var/gerrit/git"));
+ volumeMounts.add(GerritCluster.getLogsVolumeMount("/var/log/apache2"));
+
+ volumeMounts.add(
+ new VolumeMountBuilder()
+ .withName(receiver.getSpec().getCredentialSecretRef())
+ .withMountPath("/var/apache/credentials/.htpasswd")
+ .withSubPath(".htpasswd")
+ .build());
+
+ NfsWorkaroundConfig nfsWorkaround =
+ receiver.getSpec().getStorage().getStorageClasses().getNfsWorkaround();
+ if (nfsWorkaround.isEnabled() && nfsWorkaround.getIdmapdConfig() != null) {
+ volumeMounts.add(GerritCluster.getNfsImapdConfigVolumeMount());
+ }
+
+ return volumeMounts;
+ }
+
+ private List<ContainerPort> getContainerPorts(Receiver receiver) {
+ List<ContainerPort> containerPorts = new ArrayList<>();
+ containerPorts.add(new ContainerPort(HTTP_PORT, null, null, "http", null));
+ return containerPorts;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/receiver/dependent/ReceiverSecret.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/receiver/dependent/ReceiverSecret.java
new file mode 100644
index 0000000..ca4700a
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/receiver/dependent/ReceiverSecret.java
@@ -0,0 +1,45 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.receiver.dependent;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.Receiver;
+import io.fabric8.kubernetes.api.model.Secret;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependentResource;
+import io.javaoperatorsdk.operator.processing.event.ResourceID;
+import io.javaoperatorsdk.operator.processing.event.source.SecondaryToPrimaryMapper;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+@KubernetesDependent
+public class ReceiverSecret extends KubernetesDependentResource<Secret, Receiver>
+ implements SecondaryToPrimaryMapper<Secret> {
+ public ReceiverSecret() {
+ super(Secret.class);
+ }
+
+ @Override
+ public Set<ResourceID> toPrimaryResourceIDs(Secret secret) {
+ return client
+ .resources(Receiver.class)
+ .inNamespace(secret.getMetadata().getNamespace())
+ .list()
+ .getItems()
+ .stream()
+ .filter(g -> g.getSpec().getCredentialSecretRef().equals(secret.getMetadata().getName()))
+ .map(g -> ResourceID.fromResource(g))
+ .collect(Collectors.toSet());
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/receiver/dependent/ReceiverService.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/receiver/dependent/ReceiverService.java
new file mode 100644
index 0000000..3ccb644
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/receiver/dependent/ReceiverService.java
@@ -0,0 +1,91 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.receiver.dependent;
+
+import static com.google.gerrit.k8s.operator.receiver.dependent.ReceiverDeployment.HTTP_PORT;
+
+import com.google.gerrit.k8s.operator.receiver.ReceiverReconciler;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.Receiver;
+import io.fabric8.kubernetes.api.model.Service;
+import io.fabric8.kubernetes.api.model.ServiceBuilder;
+import io.fabric8.kubernetes.api.model.ServicePort;
+import io.fabric8.kubernetes.api.model.ServicePortBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.CRUDKubernetesDependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+@KubernetesDependent
+public class ReceiverService extends CRUDKubernetesDependentResource<Service, Receiver> {
+ public static final String HTTP_PORT_NAME = "http";
+
+ public ReceiverService() {
+ super(Service.class);
+ }
+
+ @Override
+ protected Service desired(Receiver receiver, Context<Receiver> context) {
+ return new ServiceBuilder()
+ .withApiVersion("v1")
+ .withNewMetadata()
+ .withName(getName(receiver))
+ .withNamespace(receiver.getMetadata().getNamespace())
+ .withLabels(getLabels(receiver))
+ .endMetadata()
+ .withNewSpec()
+ .withType(receiver.getSpec().getService().getType())
+ .withPorts(getServicePorts(receiver))
+ .withSelector(ReceiverDeployment.getSelectorLabels(receiver))
+ .endSpec()
+ .build();
+ }
+
+ public static String getName(Receiver receiver) {
+ return receiver.getMetadata().getName();
+ }
+
+ public static String getName(String receiverName) {
+ return receiverName;
+ }
+
+ public static Map<String, String> getLabels(Receiver receiver) {
+ return GerritCluster.getLabels(
+ receiver.getMetadata().getName(),
+ "receiver-service",
+ ReceiverReconciler.class.getSimpleName());
+ }
+
+ public static String getHostname(Receiver receiver) {
+ return getHostname(receiver.getMetadata().getName(), receiver.getMetadata().getNamespace());
+ }
+
+ public static String getHostname(String receiverName, String namespace) {
+ return String.format("%s.%s.svc.cluster.local", getName(receiverName), namespace);
+ }
+
+ private static List<ServicePort> getServicePorts(Receiver receiver) {
+ List<ServicePort> ports = new ArrayList<>();
+ ports.add(
+ new ServicePortBuilder()
+ .withName(HTTP_PORT_NAME)
+ .withPort(receiver.getSpec().getService().getHttpPort())
+ .withNewTargetPort(HTTP_PORT)
+ .build());
+ return ports;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/AbstractKeyStoreProvider.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/AbstractKeyStoreProvider.java
new file mode 100644
index 0000000..3b71e89
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/AbstractKeyStoreProvider.java
@@ -0,0 +1,52 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.server;
+
+import java.io.IOException;
+import java.security.KeyStore;
+import java.security.KeyStoreException;
+import java.security.NoSuchAlgorithmException;
+import java.security.cert.CertificateEncodingException;
+import java.security.cert.CertificateException;
+import java.util.Base64;
+
+public abstract class AbstractKeyStoreProvider implements KeyStoreProvider {
+ private static final String ALIAS = "operator";
+ private static final String CERT_PREFIX = "-----BEGIN CERTIFICATE-----";
+ private static final String CERT_SUFFIX = "-----END CERTIFICATE-----";
+
+ final String getAlias() {
+ return ALIAS;
+ }
+
+ @Override
+ public final String getCertificate()
+ throws CertificateEncodingException, KeyStoreException, NoSuchAlgorithmException,
+ CertificateException, IOException {
+ StringBuilder cert = new StringBuilder();
+ cert.append(CERT_PREFIX);
+ cert.append("\n");
+ cert.append(
+ Base64.getEncoder().encodeToString(getKeyStore().getCertificate(getAlias()).getEncoded()));
+ cert.append("\n");
+ cert.append(CERT_SUFFIX);
+ return cert.toString();
+ }
+
+ private final KeyStore getKeyStore()
+ throws KeyStoreException, NoSuchAlgorithmException, CertificateException, IOException {
+ return KeyStore.getInstance(getKeyStorePath().toFile(), getKeyStorePassword().toCharArray());
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/AdmissionWebhookServlet.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/AdmissionWebhookServlet.java
new file mode 100644
index 0000000..a886988
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/AdmissionWebhookServlet.java
@@ -0,0 +1,27 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.server;
+
+import jakarta.servlet.http.HttpServlet;
+
+public abstract class AdmissionWebhookServlet extends HttpServlet {
+ private static final long serialVersionUID = 1L;
+
+ public abstract String getName();
+
+ public abstract String getVersion();
+
+ public abstract String getURI();
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/FileSystemKeyStoreProvider.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/FileSystemKeyStoreProvider.java
new file mode 100644
index 0000000..a56da7f
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/FileSystemKeyStoreProvider.java
@@ -0,0 +1,36 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.server;
+
+import com.google.inject.Singleton;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+
+@Singleton
+public class FileSystemKeyStoreProvider extends AbstractKeyStoreProvider {
+ static final String KEYSTORE_PATH = "/operator/keystore.jks";
+ static final String KEYSTORE_PWD_FILE = "/operator/keystore.password";
+
+ @Override
+ public Path getKeyStorePath() {
+ return Path.of(KEYSTORE_PATH);
+ }
+
+ @Override
+ public String getKeyStorePassword() throws IOException {
+ return Files.readString(Path.of(KEYSTORE_PWD_FILE));
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/GeneratedKeyStoreProvider.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/GeneratedKeyStoreProvider.java
new file mode 100644
index 0000000..d96204d
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/GeneratedKeyStoreProvider.java
@@ -0,0 +1,132 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.server;
+
+import static com.google.gerrit.k8s.operator.GerritOperator.SERVICE_NAME;
+
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+import com.google.inject.name.Named;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.math.BigInteger;
+import java.nio.file.Path;
+import java.security.KeyPair;
+import java.security.KeyPairGenerator;
+import java.security.KeyStore;
+import java.security.KeyStoreException;
+import java.security.NoSuchAlgorithmException;
+import java.security.Security;
+import java.security.cert.Certificate;
+import java.security.cert.CertificateException;
+import java.time.Instant;
+import java.time.temporal.ChronoUnit;
+import java.util.Date;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.bouncycastle.asn1.ASN1Encodable;
+import org.bouncycastle.asn1.DERSequence;
+import org.bouncycastle.asn1.x500.X500Name;
+import org.bouncycastle.asn1.x509.Extension;
+import org.bouncycastle.asn1.x509.GeneralName;
+import org.bouncycastle.cert.CertIOException;
+import org.bouncycastle.cert.X509v3CertificateBuilder;
+import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter;
+import org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder;
+import org.bouncycastle.jce.provider.BouncyCastleProvider;
+import org.bouncycastle.operator.ContentSigner;
+import org.bouncycastle.operator.OperatorCreationException;
+import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder;
+
+@Singleton
+public class GeneratedKeyStoreProvider extends AbstractKeyStoreProvider {
+ private static final Path KEYSTORE_PATH = Path.of("/tmp/keystore.jks");
+
+ private final String namespace;
+ private final String password;
+
+ @Inject
+ public GeneratedKeyStoreProvider(@Named("Namespace") String namespace) {
+ this.namespace = namespace;
+ this.password = generatePassword();
+ generateKeyStore();
+ }
+
+ @Override
+ public Path getKeyStorePath() {
+ return KEYSTORE_PATH;
+ }
+
+ @Override
+ public String getKeyStorePassword() {
+ return password;
+ }
+
+ private String getCN() {
+ return String.format("%s.%s.svc", SERVICE_NAME, namespace);
+ }
+
+ private String generatePassword() {
+ return RandomStringUtils.randomAlphabetic(10);
+ }
+
+ private Certificate generateCertificate(KeyPair keyPair)
+ throws OperatorCreationException, CertificateException, CertIOException {
+ BouncyCastleProvider bcProvider = new BouncyCastleProvider();
+ Security.addProvider(bcProvider);
+
+ Instant start = Instant.now();
+ X500Name dnName = new X500Name(String.format("cn=%s", getCN()));
+ DERSequence subjectAlternativeNames =
+ new DERSequence(new ASN1Encodable[] {new GeneralName(GeneralName.dNSName, getCN())});
+
+ X509v3CertificateBuilder certBuilder =
+ new JcaX509v3CertificateBuilder(
+ dnName,
+ BigInteger.valueOf(start.toEpochMilli()),
+ Date.from(start),
+ Date.from(start.plus(365, ChronoUnit.DAYS)),
+ dnName,
+ keyPair.getPublic())
+ .addExtension(Extension.subjectAlternativeName, true, subjectAlternativeNames);
+
+ ContentSigner contentSigner =
+ new JcaContentSignerBuilder("SHA256WithRSA").build(keyPair.getPrivate());
+ return new JcaX509CertificateConverter()
+ .setProvider(bcProvider)
+ .getCertificate(certBuilder.build(contentSigner));
+ }
+
+ private void generateKeyStore() {
+ KEYSTORE_PATH.getParent().toFile().mkdirs();
+ try (FileOutputStream fos = new FileOutputStream(KEYSTORE_PATH.toFile())) {
+ KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA");
+ keyPairGenerator.initialize(4096);
+ KeyPair keyPair = keyPairGenerator.generateKeyPair();
+
+ Certificate[] chain = {generateCertificate(keyPair)};
+
+ KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType());
+ keyStore.load(null, null);
+ keyStore.setKeyEntry(getAlias(), keyPair.getPrivate(), password.toCharArray(), chain);
+ keyStore.store(fos, password.toCharArray());
+ } catch (IOException
+ | NoSuchAlgorithmException
+ | CertificateException
+ | KeyStoreException
+ | OperatorCreationException e) {
+ throw new IllegalStateException("Failed to create keystore.", e);
+ }
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/HealthcheckServlet.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/HealthcheckServlet.java
new file mode 100644
index 0000000..6097bde
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/HealthcheckServlet.java
@@ -0,0 +1,31 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.server;
+
+import jakarta.servlet.http.HttpServlet;
+import jakarta.servlet.http.HttpServletRequest;
+import jakarta.servlet.http.HttpServletResponse;
+import java.io.IOException;
+
+public class HealthcheckServlet extends HttpServlet {
+ private static final long serialVersionUID = 1L;
+
+ protected void doGet(HttpServletRequest request, HttpServletResponse response)
+ throws IOException {
+ response.setContentType("application/text");
+ response.setStatus(HttpServletResponse.SC_OK);
+ response.getWriter().println("ALL GOOD.");
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/HttpServer.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/HttpServer.java
new file mode 100644
index 0000000..23b8055
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/HttpServer.java
@@ -0,0 +1,72 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.server;
+
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+import java.util.Set;
+import org.eclipse.jetty.server.Connector;
+import org.eclipse.jetty.server.HttpConfiguration;
+import org.eclipse.jetty.server.HttpConnectionFactory;
+import org.eclipse.jetty.server.SecureRequestCustomizer;
+import org.eclipse.jetty.server.Server;
+import org.eclipse.jetty.server.ServerConnector;
+import org.eclipse.jetty.servlet.ServletHandler;
+import org.eclipse.jetty.servlet.ServletHolder;
+import org.eclipse.jetty.util.ssl.SslContextFactory;
+
+@Singleton
+public class HttpServer {
+ public static final String KEYSTORE_PATH = "/operator/keystore.jks";
+ public static final String KEYSTORE_PWD_FILE = "/operator/keystore.password";
+ public static final int PORT = 8080;
+
+ private final Server server = new Server();
+ private final KeyStoreProvider keyStoreProvider;
+ private final Set<AdmissionWebhookServlet> admissionWebhookServlets;
+
+ @Inject
+ public HttpServer(
+ KeyStoreProvider keyStoreProvider, Set<AdmissionWebhookServlet> admissionWebhookServlets) {
+ this.keyStoreProvider = keyStoreProvider;
+ this.admissionWebhookServlets = admissionWebhookServlets;
+ }
+
+ public void start() throws Exception {
+ SslContextFactory.Server ssl = new SslContextFactory.Server();
+ ssl.setKeyStorePath(keyStoreProvider.getKeyStorePath().toString());
+ ssl.setTrustStorePath(keyStoreProvider.getKeyStorePath().toString());
+ ssl.setKeyStorePassword(keyStoreProvider.getKeyStorePassword());
+ ssl.setTrustStorePassword(keyStoreProvider.getKeyStorePassword());
+ ssl.setSniRequired(false);
+
+ HttpConfiguration sslConfiguration = new HttpConfiguration();
+ sslConfiguration.addCustomizer(new SecureRequestCustomizer(false));
+ HttpConnectionFactory httpConnectionFactory = new HttpConnectionFactory(sslConfiguration);
+
+ ServerConnector connector = new ServerConnector(server, ssl, httpConnectionFactory);
+ connector.setPort(PORT);
+ server.setConnectors(new Connector[] {connector});
+
+ ServletHandler servletHandler = new ServletHandler();
+ for (AdmissionWebhookServlet servlet : admissionWebhookServlets) {
+ servletHandler.addServletWithMapping(new ServletHolder(servlet), servlet.getURI());
+ }
+ servletHandler.addServletWithMapping(HealthcheckServlet.class, "/health");
+ server.setHandler(servletHandler);
+
+ server.start();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/KeyStoreProvider.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/KeyStoreProvider.java
new file mode 100644
index 0000000..c41777f
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/KeyStoreProvider.java
@@ -0,0 +1,32 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.server;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.security.KeyStoreException;
+import java.security.NoSuchAlgorithmException;
+import java.security.cert.CertificateEncodingException;
+import java.security.cert.CertificateException;
+
+public interface KeyStoreProvider {
+ Path getKeyStorePath();
+
+ String getKeyStorePassword() throws IOException;
+
+ String getCertificate()
+ throws CertificateEncodingException, KeyStoreException, NoSuchAlgorithmException,
+ CertificateException, IOException;
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/ServerModule.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/ServerModule.java
new file mode 100644
index 0000000..ae613dd
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/ServerModule.java
@@ -0,0 +1,40 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.server;
+
+import static com.google.gerrit.k8s.operator.server.FileSystemKeyStoreProvider.KEYSTORE_PATH;
+
+import com.google.gerrit.k8s.operator.v1alpha.admission.servlet.GerritAdmissionWebhook;
+import com.google.gerrit.k8s.operator.v1alpha.admission.servlet.GerritClusterAdmissionWebhook;
+import com.google.gerrit.k8s.operator.v1alpha.admission.servlet.GitGcAdmissionWebhook;
+import com.google.inject.AbstractModule;
+import com.google.inject.multibindings.Multibinder;
+import java.io.File;
+
+public class ServerModule extends AbstractModule {
+ public void configure() {
+ if (new File(KEYSTORE_PATH).exists()) {
+ bind(KeyStoreProvider.class).to(FileSystemKeyStoreProvider.class);
+ } else {
+ bind(KeyStoreProvider.class).to(GeneratedKeyStoreProvider.class);
+ }
+ bind(HttpServer.class);
+ Multibinder<AdmissionWebhookServlet> admissionWebhookServlets =
+ Multibinder.newSetBinder(binder(), AdmissionWebhookServlet.class);
+ admissionWebhookServlets.addBinding().to(GerritClusterAdmissionWebhook.class);
+ admissionWebhookServlets.addBinding().to(GitGcAdmissionWebhook.class);
+ admissionWebhookServlets.addBinding().to(GerritAdmissionWebhook.class);
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/ValidatingAdmissionWebhookServlet.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/ValidatingAdmissionWebhookServlet.java
new file mode 100644
index 0000000..f6d7c39
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/server/ValidatingAdmissionWebhookServlet.java
@@ -0,0 +1,61 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.server;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.flogger.FluentLogger;
+import io.fabric8.kubernetes.api.model.HasMetadata;
+import io.fabric8.kubernetes.api.model.Status;
+import io.fabric8.kubernetes.api.model.admission.v1.AdmissionResponseBuilder;
+import io.fabric8.kubernetes.api.model.admission.v1.AdmissionReview;
+import jakarta.servlet.http.HttpServletRequest;
+import jakarta.servlet.http.HttpServletResponse;
+import java.io.IOException;
+
+public abstract class ValidatingAdmissionWebhookServlet extends AdmissionWebhookServlet {
+ private static final long serialVersionUID = 1L;
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+
+ public abstract Status validate(HasMetadata resource);
+
+ @Override
+ public void doPost(HttpServletRequest request, HttpServletResponse response) throws IOException {
+ ObjectMapper objectMapper = new ObjectMapper();
+ AdmissionReview admissionReq =
+ objectMapper.readValue(request.getInputStream(), AdmissionReview.class);
+
+ logger.atFine().log("Admission request received: %s", admissionReq.toString());
+
+ response.setContentType("application/json");
+ AdmissionResponseBuilder admissionRespBuilder =
+ new AdmissionResponseBuilder().withUid(admissionReq.getRequest().getUid());
+ Status validationStatus = validate((HasMetadata) admissionReq.getRequest().getObject());
+ response.setStatus(HttpServletResponse.SC_OK);
+ if (validationStatus.getCode() < 400) {
+ admissionRespBuilder = admissionRespBuilder.withAllowed(true);
+ } else {
+ admissionRespBuilder = admissionRespBuilder.withAllowed(false).withStatus(validationStatus);
+ }
+ admissionReq.setResponse(admissionRespBuilder.build());
+ objectMapper.writeValue(response.getWriter(), admissionReq);
+ logger.atFine().log(
+ "Admission request responded with %s", admissionReq.getResponse().toString());
+ }
+
+ @Override
+ public String getURI() {
+ return String.format("/admission/%s/%s", getVersion(), getName());
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/util/CRUDKubernetesDependentPVCResource.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/util/CRUDKubernetesDependentPVCResource.java
new file mode 100644
index 0000000..cd5bd8e
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/util/CRUDKubernetesDependentPVCResource.java
@@ -0,0 +1,54 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.util;
+
+import com.google.common.flogger.FluentLogger;
+import io.fabric8.kubernetes.api.model.HasMetadata;
+import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
+import io.fabric8.kubernetes.api.model.PersistentVolumeClaimSpec;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.CRUDKubernetesDependentResource;
+
+public abstract class CRUDKubernetesDependentPVCResource<P extends HasMetadata>
+ extends CRUDKubernetesDependentResource<PersistentVolumeClaim, P> {
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+
+ public CRUDKubernetesDependentPVCResource() {
+ super(PersistentVolumeClaim.class);
+ }
+
+ @Override
+ protected final PersistentVolumeClaim desired(P primary, Context<P> context) {
+ PersistentVolumeClaim pvc = desiredPVC(primary, context);
+ PersistentVolumeClaim existingPvc =
+ client
+ .persistentVolumeClaims()
+ .inNamespace(pvc.getMetadata().getNamespace())
+ .withName(pvc.getMetadata().getName())
+ .get();
+ String volumeName = pvc.getSpec().getVolumeName();
+ if (existingPvc != null && (volumeName == null || volumeName.isEmpty())) {
+ logger.atFine().log(
+ "PVC %s/%s already has bound a PV. Keeping volumeName reference.",
+ pvc.getMetadata().getNamespace(), pvc.getMetadata().getName());
+ PersistentVolumeClaimSpec pvcSpec = pvc.getSpec();
+ pvcSpec.setVolumeName(existingPvc.getSpec().getVolumeName());
+ pvc.setSpec(pvcSpec);
+ }
+ return pvc;
+ }
+
+ protected abstract PersistentVolumeClaim desiredPVC(P primary, Context<P> context);
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/admission/servlet/GerritAdmissionWebhook.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/admission/servlet/GerritAdmissionWebhook.java
new file mode 100644
index 0000000..f74cb59
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/admission/servlet/GerritAdmissionWebhook.java
@@ -0,0 +1,116 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.admission.servlet;
+
+import static com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GlobalRefDbConfig.RefDatabase.SPANNER;
+import static com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GlobalRefDbConfig.RefDatabase.ZOOKEEPER;
+
+import com.google.gerrit.k8s.operator.gerrit.config.InvalidGerritConfigException;
+import com.google.gerrit.k8s.operator.server.ValidatingAdmissionWebhookServlet;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GlobalRefDbConfig;
+import com.google.gerrit.k8s.operator.v1alpha.gerrit.config.GerritConfigBuilder;
+import com.google.inject.Singleton;
+import io.fabric8.kubernetes.api.model.HasMetadata;
+import io.fabric8.kubernetes.api.model.Status;
+import io.fabric8.kubernetes.api.model.StatusBuilder;
+import jakarta.servlet.http.HttpServletResponse;
+import java.util.Locale;
+
+@Singleton
+public class GerritAdmissionWebhook extends ValidatingAdmissionWebhookServlet {
+ private static final long serialVersionUID = 1L;
+
+ @Override
+ public Status validate(HasMetadata resource) {
+ if (!(resource instanceof Gerrit)) {
+ return new StatusBuilder()
+ .withCode(HttpServletResponse.SC_BAD_REQUEST)
+ .withMessage("Invalid resource. Expected Gerrit-resource for validation.")
+ .build();
+ }
+
+ Gerrit gerrit = (Gerrit) resource;
+
+ try {
+ invalidGerritConfiguration(gerrit);
+ } catch (InvalidGerritConfigException e) {
+ return new StatusBuilder()
+ .withCode(HttpServletResponse.SC_BAD_REQUEST)
+ .withMessage(e.getMessage())
+ .build();
+ }
+
+ if (noRefDbConfiguredForHA(gerrit)) {
+ return new StatusBuilder()
+ .withCode(HttpServletResponse.SC_BAD_REQUEST)
+ .withMessage(
+ "A Ref-Database is required to horizontally scale a primary Gerrit: .spec.refdb.database != NONE")
+ .build();
+ }
+
+ if (missingRefdbConfig(gerrit)) {
+ String refDbName = "";
+ switch (gerrit.getSpec().getRefdb().getDatabase()) {
+ case ZOOKEEPER:
+ refDbName = ZOOKEEPER.toString().toLowerCase(Locale.US);
+ break;
+ case SPANNER:
+ refDbName = SPANNER.toString().toLowerCase(Locale.US);
+ break;
+ default:
+ break;
+ }
+ return new StatusBuilder()
+ .withCode(HttpServletResponse.SC_BAD_REQUEST)
+ .withMessage(
+ String.format("Missing %s configuration (.spec.refdb.%s)", refDbName, refDbName))
+ .build();
+ }
+
+ return new StatusBuilder().withCode(HttpServletResponse.SC_OK).build();
+ }
+
+ private void invalidGerritConfiguration(Gerrit gerrit) throws InvalidGerritConfigException {
+ new GerritConfigBuilder(gerrit).validate();
+ }
+
+ private boolean noRefDbConfiguredForHA(Gerrit gerrit) {
+ return gerrit.getSpec().isHighlyAvailablePrimary()
+ && gerrit.getSpec().getRefdb().getDatabase().equals(GlobalRefDbConfig.RefDatabase.NONE);
+ }
+
+ private boolean missingRefdbConfig(Gerrit gerrit) {
+ GlobalRefDbConfig refDbConfig = gerrit.getSpec().getRefdb();
+ switch (refDbConfig.getDatabase()) {
+ case ZOOKEEPER:
+ return refDbConfig.getZookeeper() == null;
+ case SPANNER:
+ return refDbConfig.getSpanner() == null;
+ default:
+ return false;
+ }
+ }
+
+ @Override
+ public String getName() {
+ return "gerrit";
+ }
+
+ @Override
+ public String getVersion() {
+ return "v1alpha";
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/admission/servlet/GerritClusterAdmissionWebhook.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/admission/servlet/GerritClusterAdmissionWebhook.java
new file mode 100644
index 0000000..8817559
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/admission/servlet/GerritClusterAdmissionWebhook.java
@@ -0,0 +1,103 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.admission.servlet;
+
+import com.google.gerrit.k8s.operator.server.ValidatingAdmissionWebhookServlet;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplate;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplateSpec.GerritMode;
+import com.google.inject.Singleton;
+import io.fabric8.kubernetes.api.model.HasMetadata;
+import io.fabric8.kubernetes.api.model.Status;
+import io.fabric8.kubernetes.api.model.StatusBuilder;
+import jakarta.servlet.http.HttpServletResponse;
+
+@Singleton
+public class GerritClusterAdmissionWebhook extends ValidatingAdmissionWebhookServlet {
+ private static final long serialVersionUID = 1L;
+
+ @Override
+ public Status validate(HasMetadata resource) {
+ if (!(resource instanceof GerritCluster)) {
+ return new StatusBuilder()
+ .withCode(HttpServletResponse.SC_BAD_REQUEST)
+ .withMessage("Invalid resource. Expected GerritCluster-resource for validation.")
+ .build();
+ }
+
+ GerritCluster gerritCluster = (GerritCluster) resource;
+
+ if (multiplePrimaryGerritInCluster(gerritCluster)) {
+ return new StatusBuilder()
+ .withCode(HttpServletResponse.SC_CONFLICT)
+ .withMessage("Only a single primary Gerrit is allowed per Gerrit Cluster.")
+ .build();
+ }
+
+ if (primaryGerritAndReceiverInCluster(gerritCluster)) {
+ return new StatusBuilder()
+ .withCode(HttpServletResponse.SC_CONFLICT)
+ .withMessage("A primary Gerrit cannot be in the same Gerrit Cluster as a Receiver.")
+ .build();
+ }
+
+ if (multipleGerritReplicaInCluster(gerritCluster)) {
+ return new StatusBuilder()
+ .withCode(HttpServletResponse.SC_CONFLICT)
+ .withMessage("Only a single Gerrit Replica is allowed per Gerrit Cluster.")
+ .build();
+ }
+
+ GerritAdmissionWebhook gerritAdmission = new GerritAdmissionWebhook();
+ for (GerritTemplate gerrit : gerritCluster.getSpec().getGerrits()) {
+ Status status = gerritAdmission.validate(gerrit.toGerrit(gerritCluster));
+ if (status.getCode() != HttpServletResponse.SC_OK) {
+ return status;
+ }
+ }
+
+ return new StatusBuilder().withCode(HttpServletResponse.SC_OK).build();
+ }
+
+ private boolean multiplePrimaryGerritInCluster(GerritCluster gerritCluster) {
+ return gerritCluster.getSpec().getGerrits().stream()
+ .filter(g -> g.getSpec().getMode() == GerritMode.PRIMARY)
+ .count()
+ > 1;
+ }
+
+ private boolean primaryGerritAndReceiverInCluster(GerritCluster gerritCluster) {
+ return gerritCluster.getSpec().getGerrits().stream()
+ .anyMatch(g -> g.getSpec().getMode() == GerritMode.PRIMARY)
+ && gerritCluster.getSpec().getReceiver() != null;
+ }
+
+ private boolean multipleGerritReplicaInCluster(GerritCluster gerritCluster) {
+ return gerritCluster.getSpec().getGerrits().stream()
+ .filter(g -> g.getSpec().getMode() == GerritMode.REPLICA)
+ .count()
+ > 1;
+ }
+
+ @Override
+ public String getName() {
+ return "gerritcluster";
+ }
+
+ @Override
+ public String getVersion() {
+ return "v1alpha";
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/admission/servlet/GitGcAdmissionWebhook.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/admission/servlet/GitGcAdmissionWebhook.java
new file mode 100644
index 0000000..3198c1a
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/admission/servlet/GitGcAdmissionWebhook.java
@@ -0,0 +1,114 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.admission.servlet;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.k8s.operator.server.ValidatingAdmissionWebhookServlet;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gitgc.GitGarbageCollection;
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+import io.fabric8.kubernetes.api.model.HasMetadata;
+import io.fabric8.kubernetes.api.model.Status;
+import io.fabric8.kubernetes.api.model.StatusBuilder;
+import io.fabric8.kubernetes.client.KubernetesClient;
+import jakarta.servlet.http.HttpServletResponse;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+@Singleton
+public class GitGcAdmissionWebhook extends ValidatingAdmissionWebhookServlet {
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+ private static final long serialVersionUID = 1L;
+ private static final Status OK_STATUS =
+ new StatusBuilder().withCode(HttpServletResponse.SC_OK).build();
+
+ private final KubernetesClient client;
+
+ @Inject
+ public GitGcAdmissionWebhook(KubernetesClient client) {
+ this.client = client;
+ }
+
+ @Override
+ public Status validate(HasMetadata resource) {
+ if (!(resource instanceof GitGarbageCollection)) {
+ return new StatusBuilder()
+ .withCode(HttpServletResponse.SC_BAD_REQUEST)
+ .withMessage("Invalid resource. Expected GitGarbageCollection-resource for validation.")
+ .build();
+ }
+
+ GitGarbageCollection gitGc = (GitGarbageCollection) resource;
+
+ String gitGcUid = gitGc.getMetadata().getUid();
+ List<GitGarbageCollection> gitGcs =
+ client
+ .resources(GitGarbageCollection.class)
+ .inNamespace(gitGc.getMetadata().getNamespace())
+ .list()
+ .getItems()
+ .stream()
+ .filter(gc -> !gc.getMetadata().getUid().equals(gitGcUid))
+ .collect(Collectors.toList());
+ Set<String> projects = gitGc.getSpec().getProjects();
+
+ logger.atFine().log("Detected GitGcs: %s", gitGcs);
+ if (projects.isEmpty()) {
+ if (gitGcs.stream().anyMatch(gc -> gc.getSpec().getProjects().isEmpty())) {
+ return new StatusBuilder()
+ .withCode(HttpServletResponse.SC_CONFLICT)
+ .withMessage("Only a single GitGc working on all projects allowed per GerritCluster.")
+ .build();
+ }
+ return OK_STATUS;
+ }
+
+ Set<String> projectsWithExistingGC =
+ gitGcs.stream()
+ .map(gc -> gc.getSpec().getProjects())
+ .flatMap(Collection::stream)
+ .collect(Collectors.toSet());
+ Set<String> projectsIntersection = getIntersection(projects, projectsWithExistingGC);
+ if (projectsIntersection.isEmpty()) {
+ return OK_STATUS;
+ }
+ return new StatusBuilder()
+ .withCode(HttpServletResponse.SC_CONFLICT)
+ .withMessage(
+ "Only a single GitGc is allowed to work on a given project. Conflict for projects: "
+ + projectsIntersection)
+ .build();
+ }
+
+ private Set<String> getIntersection(Set<String> set1, Set<String> set2) {
+ Set<String> intersection = new HashSet<>();
+ intersection.addAll(set1);
+ intersection.retainAll(set2);
+ return intersection;
+ }
+
+ @Override
+ public String getName() {
+ return "gitgc";
+ }
+
+ @Override
+ public String getVersion() {
+ return "v1alpha";
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/cluster/GerritCluster.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/cluster/GerritCluster.java
new file mode 100644
index 0000000..b88ab09
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/cluster/GerritCluster.java
@@ -0,0 +1,242 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.cluster;
+
+import static com.google.gerrit.k8s.operator.cluster.dependent.NfsIdmapdConfigMap.NFS_IDMAPD_CM_NAME;
+import static com.google.gerrit.k8s.operator.cluster.dependent.SharedPVC.SHARED_PVC_NAME;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.ContainerImageConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.SharedStorage.ExternalPVCConfig;
+import io.fabric8.kubernetes.api.model.Container;
+import io.fabric8.kubernetes.api.model.ContainerBuilder;
+import io.fabric8.kubernetes.api.model.EnvVar;
+import io.fabric8.kubernetes.api.model.EnvVarBuilder;
+import io.fabric8.kubernetes.api.model.Namespaced;
+import io.fabric8.kubernetes.api.model.Volume;
+import io.fabric8.kubernetes.api.model.VolumeBuilder;
+import io.fabric8.kubernetes.api.model.VolumeMount;
+import io.fabric8.kubernetes.api.model.VolumeMountBuilder;
+import io.fabric8.kubernetes.client.CustomResource;
+import io.fabric8.kubernetes.model.annotation.Group;
+import io.fabric8.kubernetes.model.annotation.ShortNames;
+import io.fabric8.kubernetes.model.annotation.Version;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import org.apache.commons.lang3.builder.ToStringBuilder;
+import org.apache.commons.lang3.builder.ToStringStyle;
+
+@Group("gerritoperator.google.com")
+@Version("v1alpha17")
+@ShortNames("gclus")
+public class GerritCluster extends CustomResource<GerritClusterSpec, GerritClusterStatus>
+ implements Namespaced {
+ private static final long serialVersionUID = 2L;
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+ private static final String SHARED_VOLUME_NAME = "shared";
+ private static final String NFS_IDMAPD_CONFIG_VOLUME_NAME = "nfs-config";
+ private static final int GERRIT_FS_UID = 1000;
+ private static final int GERRIT_FS_GID = 100;
+ public static final String PLUGIN_CACHE_MOUNT_PATH = "/var/mnt/plugin_cache";
+ public static final String PLUGIN_CACHE_SUB_DIR = "plugin_cache";
+
+ public String toString() {
+ return ToStringBuilder.reflectionToString(this, ToStringStyle.JSON_STYLE);
+ }
+
+ @JsonIgnore
+ public Map<String, String> getLabels(String component, String createdBy) {
+ return getLabels(getMetadata().getName(), component, createdBy);
+ }
+
+ // TODO(Thomas): Having so many string parameters is bad. The only parameter should be the
+ // Kubernetes resource that implements an interface that provides methods to retrieve the
+ // required information.
+ @JsonIgnore
+ public static Map<String, String> getLabels(String instance, String component, String createdBy) {
+ Map<String, String> labels = new HashMap<>();
+
+ labels.putAll(getSelectorLabels(instance, component));
+ String version = GerritCluster.class.getPackage().getImplementationVersion();
+ if (version == null || version.isBlank()) {
+ logger.atWarning().log("Unable to read Gerrit Operator version from jar.");
+ version = "unknown";
+ }
+ labels.put("app.kubernetes.io/version", version);
+ labels.put("app.kubernetes.io/created-by", createdBy);
+
+ return labels;
+ }
+
+ @JsonIgnore
+ public static Map<String, String> getSelectorLabels(String instance, String component) {
+ Map<String, String> labels = new HashMap<>();
+
+ labels.put("app.kubernetes.io/name", "gerrit");
+ labels.put("app.kubernetes.io/instance", instance);
+ labels.put("app.kubernetes.io/component", component);
+ labels.put("app.kubernetes.io/part-of", instance);
+ labels.put("app.kubernetes.io/managed-by", "gerrit-operator");
+
+ return labels;
+ }
+
+ @JsonIgnore
+ public static Volume getSharedVolume(ExternalPVCConfig externalPVC) {
+ String claimName = externalPVC.isEnabled() ? externalPVC.getClaimName() : SHARED_PVC_NAME;
+ return new VolumeBuilder()
+ .withName(SHARED_VOLUME_NAME)
+ .withNewPersistentVolumeClaim()
+ .withClaimName(claimName)
+ .endPersistentVolumeClaim()
+ .build();
+ }
+
+ @JsonIgnore
+ public static VolumeMount getGitRepositoriesVolumeMount() {
+ return getGitRepositoriesVolumeMount("/var/mnt/git");
+ }
+
+ @JsonIgnore
+ public static VolumeMount getGitRepositoriesVolumeMount(String mountPath) {
+ return new VolumeMountBuilder()
+ .withName(SHARED_VOLUME_NAME)
+ .withSubPath("git")
+ .withMountPath(mountPath)
+ .build();
+ }
+
+ @JsonIgnore
+ public static VolumeMount getHAShareVolumeMount() {
+ return getSharedVolumeMount("shared", "/var/mnt/shared");
+ }
+
+ @JsonIgnore
+ public static VolumeMount getPluginCacheVolumeMount() {
+ return getSharedVolumeMount(PLUGIN_CACHE_SUB_DIR, "/var/mnt/plugin_cache");
+ }
+
+ @JsonIgnore
+ public static VolumeMount getSharedVolumeMount(String subPath, String mountPath) {
+ return new VolumeMountBuilder()
+ .withName(SHARED_VOLUME_NAME)
+ .withSubPath(subPath)
+ .withMountPath(mountPath)
+ .build();
+ }
+
+ @JsonIgnore
+ public static VolumeMount getLogsVolumeMount() {
+ return getLogsVolumeMount("/var/mnt/logs");
+ }
+
+ @JsonIgnore
+ public static VolumeMount getLogsVolumeMount(String mountPath) {
+ return new VolumeMountBuilder()
+ .withName(SHARED_VOLUME_NAME)
+ .withSubPathExpr("logs/$(POD_NAME)")
+ .withMountPath(mountPath)
+ .build();
+ }
+
+ @JsonIgnore
+ public static Volume getNfsImapdConfigVolume() {
+ return new VolumeBuilder()
+ .withName(NFS_IDMAPD_CONFIG_VOLUME_NAME)
+ .withNewConfigMap()
+ .withName(NFS_IDMAPD_CM_NAME)
+ .endConfigMap()
+ .build();
+ }
+
+ @JsonIgnore
+ public static VolumeMount getNfsImapdConfigVolumeMount() {
+ return new VolumeMountBuilder()
+ .withName(NFS_IDMAPD_CONFIG_VOLUME_NAME)
+ .withMountPath("/etc/idmapd.conf")
+ .withSubPath("idmapd.conf")
+ .build();
+ }
+
+ @JsonIgnore
+ public Container createNfsInitContainer() {
+ return createNfsInitContainer(
+ getSpec().getStorage().getStorageClasses().getNfsWorkaround().getIdmapdConfig() != null,
+ getSpec().getContainerImages());
+ }
+
+ @JsonIgnore
+ public static Container createNfsInitContainer(
+ boolean configureIdmapd, ContainerImageConfig imageConfig) {
+ return createNfsInitContainer(configureIdmapd, imageConfig, List.of());
+ }
+
+ @JsonIgnore
+ public static Container createNfsInitContainer(
+ boolean configureIdmapd,
+ ContainerImageConfig imageConfig,
+ List<VolumeMount> additionalVolumeMounts) {
+ List<VolumeMount> volumeMounts = new ArrayList<>();
+ volumeMounts.add(getLogsVolumeMount());
+ volumeMounts.add(getGitRepositoriesVolumeMount());
+
+ volumeMounts.addAll(additionalVolumeMounts);
+
+ if (configureIdmapd) {
+ volumeMounts.add(getNfsImapdConfigVolumeMount());
+ }
+
+ StringBuilder args = new StringBuilder();
+ args.append("chown -R ");
+ args.append(GERRIT_FS_UID);
+ args.append(":");
+ args.append(GERRIT_FS_GID);
+ args.append(" ");
+ for (VolumeMount vm : volumeMounts) {
+ args.append(vm.getMountPath());
+ args.append(" ");
+ }
+
+ return new ContainerBuilder()
+ .withName("nfs-init")
+ .withImagePullPolicy(imageConfig.getImagePullPolicy())
+ .withImage(imageConfig.getBusyBox().getBusyBoxImage())
+ .withCommand(List.of("sh", "-c"))
+ .withArgs(args.toString().trim())
+ .withEnv(getPodNameEnvVar())
+ .withVolumeMounts(volumeMounts)
+ .build();
+ }
+
+ @JsonIgnore
+ public static EnvVar getPodNameEnvVar() {
+ return new EnvVarBuilder()
+ .withName("POD_NAME")
+ .withNewValueFrom()
+ .withNewFieldRef()
+ .withFieldPath("metadata.name")
+ .endFieldRef()
+ .endValueFrom()
+ .build();
+ }
+
+ @JsonIgnore
+ public String getDependentResourceName(String nameSuffix) {
+ return String.format("%s-%s", getMetadata().getName(), nameSuffix);
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/cluster/GerritClusterSpec.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/cluster/GerritClusterSpec.java
new file mode 100644
index 0000000..956c678
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/cluster/GerritClusterSpec.java
@@ -0,0 +1,91 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.cluster;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplate;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.ReceiverTemplate;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.ContainerImageConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GerritClusterIngressConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GerritStorageConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GlobalRefDbConfig;
+import java.util.ArrayList;
+import java.util.List;
+
+public class GerritClusterSpec {
+
+ private GerritStorageConfig storage = new GerritStorageConfig();
+ private ContainerImageConfig containerImages = new ContainerImageConfig();
+ private GerritClusterIngressConfig ingress = new GerritClusterIngressConfig();
+ private GlobalRefDbConfig refdb = new GlobalRefDbConfig();
+ private String serverId = "";
+ private List<GerritTemplate> gerrits = new ArrayList<>();
+ private ReceiverTemplate receiver;
+
+ public GerritStorageConfig getStorage() {
+ return storage;
+ }
+
+ public void setStorage(GerritStorageConfig storage) {
+ this.storage = storage;
+ }
+
+ public ContainerImageConfig getContainerImages() {
+ return containerImages;
+ }
+
+ public void setContainerImages(ContainerImageConfig containerImages) {
+ this.containerImages = containerImages;
+ }
+
+ public GerritClusterIngressConfig getIngress() {
+ return ingress;
+ }
+
+ public void setIngress(GerritClusterIngressConfig ingress) {
+ this.ingress = ingress;
+ }
+
+ public GlobalRefDbConfig getRefdb() {
+ return refdb;
+ }
+
+ public void setRefdb(GlobalRefDbConfig refdb) {
+ this.refdb = refdb;
+ }
+
+ public String getServerId() {
+ return serverId;
+ }
+
+ public void setServerId(String serverId) {
+ this.serverId = serverId;
+ }
+
+ public List<GerritTemplate> getGerrits() {
+ return gerrits;
+ }
+
+ public void setGerrits(List<GerritTemplate> gerrits) {
+ this.gerrits = gerrits;
+ }
+
+ public ReceiverTemplate getReceiver() {
+ return receiver;
+ }
+
+ public void setReceiver(ReceiverTemplate receiver) {
+ this.receiver = receiver;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/cluster/GerritClusterStatus.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/cluster/GerritClusterStatus.java
new file mode 100644
index 0000000..a0b853d
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/cluster/GerritClusterStatus.java
@@ -0,0 +1,30 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.cluster;
+
+import java.util.List;
+import java.util.Map;
+
+public class GerritClusterStatus {
+ private Map<String, List<String>> members;
+
+ public Map<String, List<String>> getMembers() {
+ return members;
+ }
+
+ public void setMembers(Map<String, List<String>> members) {
+ this.members = members;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/Gerrit.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/Gerrit.java
new file mode 100644
index 0000000..4027cdd
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/Gerrit.java
@@ -0,0 +1,40 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import io.fabric8.kubernetes.api.model.Namespaced;
+import io.fabric8.kubernetes.client.CustomResource;
+import io.fabric8.kubernetes.model.annotation.Group;
+import io.fabric8.kubernetes.model.annotation.ShortNames;
+import io.fabric8.kubernetes.model.annotation.Version;
+import org.apache.commons.lang3.builder.ToStringBuilder;
+import org.apache.commons.lang3.builder.ToStringStyle;
+
+@Group("gerritoperator.google.com")
+@Version("v1alpha17")
+@ShortNames("gcr")
+public class Gerrit extends CustomResource<GerritSpec, GerritStatus> implements Namespaced {
+ private static final long serialVersionUID = 2L;
+
+ public String toString() {
+ return ToStringBuilder.reflectionToString(this, ToStringStyle.JSON_STYLE);
+ }
+
+ @JsonIgnore
+ public boolean isSshEnabled() {
+ return getSpec().getService().getSshPort() > 0;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritDebugConfig.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritDebugConfig.java
new file mode 100644
index 0000000..7e7a5fd
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritDebugConfig.java
@@ -0,0 +1,36 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit;
+
+public class GerritDebugConfig {
+ private boolean enabled;
+ private boolean suspend;
+
+ public boolean isEnabled() {
+ return enabled;
+ }
+
+ public void setEnabled(boolean enabled) {
+ this.enabled = enabled;
+ }
+
+ public boolean isSuspend() {
+ return suspend;
+ }
+
+ public void setSuspend(boolean suspend) {
+ this.suspend = suspend;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritInitConfig.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritInitConfig.java
new file mode 100644
index 0000000..82dc41f
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritInitConfig.java
@@ -0,0 +1,89 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.List;
+
+public class GerritInitConfig {
+ private String caCertPath = "/var/config/ca.crt";
+ private boolean pluginCacheEnabled;
+ private String pluginCacheDir = "/var/mnt/plugins";
+ private List<GerritPlugin> plugins;
+ private List<GerritModule> libs;
+
+ @JsonProperty("highAvailability")
+ private boolean isHighlyAvailable;
+
+ private String refdb;
+
+ public String getCaCertPath() {
+ return caCertPath;
+ }
+
+ public void setCaCertPath(String caCertPath) {
+ this.caCertPath = caCertPath;
+ }
+
+ public boolean isPluginCacheEnabled() {
+ return pluginCacheEnabled;
+ }
+
+ public void setPluginCacheEnabled(boolean pluginCacheEnabled) {
+ this.pluginCacheEnabled = pluginCacheEnabled;
+ }
+
+ public String getPluginCacheDir() {
+ return pluginCacheDir;
+ }
+
+ public void setPluginCacheDir(String pluginCacheDir) {
+ this.pluginCacheDir = pluginCacheDir;
+ }
+
+ public List<GerritPlugin> getPlugins() {
+ return plugins;
+ }
+
+ public void setPlugins(List<GerritPlugin> plugins) {
+ this.plugins = plugins;
+ }
+
+ public List<GerritModule> getLibs() {
+ return libs;
+ }
+
+ public void setLibs(List<GerritModule> libs) {
+ this.libs = libs;
+ }
+
+ @JsonProperty("highAvailability")
+ public boolean isHighlyAvailable() {
+ return isHighlyAvailable;
+ }
+
+ @JsonProperty("highAvailability")
+ public void setHighlyAvailable(boolean isHighlyAvailable) {
+ this.isHighlyAvailable = isHighlyAvailable;
+ }
+
+ public String getRefdb() {
+ return refdb;
+ }
+
+ public void setRefdb(String refdb) {
+ this.refdb = refdb;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritModule.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritModule.java
new file mode 100644
index 0000000..5b0f241
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritModule.java
@@ -0,0 +1,66 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import java.io.Serializable;
+
+public class GerritModule implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private String name;
+
+ @JsonInclude(JsonInclude.Include.NON_EMPTY)
+ private String url;
+
+ @JsonInclude(JsonInclude.Include.NON_EMPTY)
+ private String sha1;
+
+ public GerritModule() {}
+
+ public GerritModule(String name) {
+ this.name = name;
+ }
+
+ public GerritModule(String name, String url, String sha1) {
+ this.name = name;
+ this.url = url;
+ this.sha1 = sha1;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public String getUrl() {
+ return url;
+ }
+
+ public void setUrl(String url) {
+ this.url = url;
+ }
+
+ public String getSha1() {
+ return sha1;
+ }
+
+ public void setSha1(String sha1) {
+ this.sha1 = sha1;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritPlugin.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritPlugin.java
new file mode 100644
index 0000000..196ecb4
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritPlugin.java
@@ -0,0 +1,48 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonInclude;
+
+public class GerritPlugin extends GerritModule {
+ private static final long serialVersionUID = 1L;
+
+ @JsonInclude(JsonInclude.Include.NON_EMPTY)
+ private boolean installAsLibrary = false;
+
+ public GerritPlugin() {}
+
+ public GerritPlugin(String name) {
+ super(name);
+ }
+
+ public GerritPlugin(String name, String url, String sha1) {
+ super(name, url, sha1);
+ }
+
+ public boolean isInstallAsLibrary() {
+ return installAsLibrary;
+ }
+
+ public void setInstallAsLibrary(boolean installAsLibrary) {
+ this.installAsLibrary = installAsLibrary;
+ }
+
+ @JsonIgnore
+ public boolean isPackagedPlugin() {
+ return getUrl() == null;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritProbe.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritProbe.java
new file mode 100644
index 0000000..46733ed
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritProbe.java
@@ -0,0 +1,81 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.google.gerrit.k8s.operator.gerrit.dependent.GerritStatefulSet;
+import io.fabric8.kubernetes.api.model.ExecAction;
+import io.fabric8.kubernetes.api.model.GRPCAction;
+import io.fabric8.kubernetes.api.model.HTTPGetAction;
+import io.fabric8.kubernetes.api.model.HTTPGetActionBuilder;
+import io.fabric8.kubernetes.api.model.IntOrString;
+import io.fabric8.kubernetes.api.model.Probe;
+import io.fabric8.kubernetes.api.model.TCPSocketAction;
+
+public class GerritProbe extends Probe {
+ private static final long serialVersionUID = 1L;
+
+ private static final HTTPGetAction HTTP_GET_ACTION =
+ new HTTPGetActionBuilder()
+ .withPath("/config/server/healthcheck~status")
+ .withPort(new IntOrString(GerritStatefulSet.HTTP_PORT))
+ .build();
+
+ @JsonIgnore private ExecAction exec;
+
+ @JsonIgnore private GRPCAction grpc;
+
+ @JsonIgnore private TCPSocketAction tcpSocket;
+
+ @Override
+ public void setExec(ExecAction exec) {
+ super.setExec(null);
+ }
+
+ @Override
+ public void setGrpc(GRPCAction grpc) {
+ super.setGrpc(null);
+ }
+
+ @Override
+ public void setHttpGet(HTTPGetAction httpGet) {
+ super.setHttpGet(HTTP_GET_ACTION);
+ }
+
+ @Override
+ public void setTcpSocket(TCPSocketAction tcpSocket) {
+ super.setTcpSocket(null);
+ }
+
+ @Override
+ public ExecAction getExec() {
+ return null;
+ }
+
+ @Override
+ public GRPCAction getGrpc() {
+ return null;
+ }
+
+ @Override
+ public HTTPGetAction getHttpGet() {
+ return HTTP_GET_ACTION;
+ }
+
+ @Override
+ public TCPSocketAction getTcpSocket() {
+ return null;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritSite.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritSite.java
new file mode 100644
index 0000000..2b604c8
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritSite.java
@@ -0,0 +1,31 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit;
+
+import io.fabric8.kubernetes.api.model.Quantity;
+import java.io.Serializable;
+
+public class GerritSite implements Serializable {
+ private static final long serialVersionUID = 1L;
+ Quantity size;
+
+ public Quantity getSize() {
+ return size;
+ }
+
+ public void setSize(Quantity size) {
+ this.size = size;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritSpec.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritSpec.java
new file mode 100644
index 0000000..ed21087
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritSpec.java
@@ -0,0 +1,74 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.ContainerImageConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GerritStorageConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GlobalRefDbConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.IngressConfig;
+
+public class GerritSpec extends GerritTemplateSpec {
+ private ContainerImageConfig containerImages = new ContainerImageConfig();
+ private GerritStorageConfig storage = new GerritStorageConfig();
+ private IngressConfig ingress = new IngressConfig();
+ private GlobalRefDbConfig refdb = new GlobalRefDbConfig();
+ private String serverId = "";
+
+ public GerritSpec() {}
+
+ public GerritSpec(GerritTemplateSpec templateSpec) {
+ super(templateSpec);
+ }
+
+ public ContainerImageConfig getContainerImages() {
+ return containerImages;
+ }
+
+ public void setContainerImages(ContainerImageConfig containerImages) {
+ this.containerImages = containerImages;
+ }
+
+ public GerritStorageConfig getStorage() {
+ return storage;
+ }
+
+ public void setStorage(GerritStorageConfig storage) {
+ this.storage = storage;
+ }
+
+ public IngressConfig getIngress() {
+ return ingress;
+ }
+
+ public void setIngress(IngressConfig ingress) {
+ this.ingress = ingress;
+ }
+
+ public GlobalRefDbConfig getRefdb() {
+ return refdb;
+ }
+
+ public void setRefdb(GlobalRefDbConfig refdb) {
+ this.refdb = refdb;
+ }
+
+ public String getServerId() {
+ return serverId;
+ }
+
+ public void setServerId(String serverId) {
+ this.serverId = serverId;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritStatus.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritStatus.java
new file mode 100644
index 0000000..f779f75
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritStatus.java
@@ -0,0 +1,48 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class GerritStatus {
+ private boolean ready = false;
+ private Map<String, String> appliedConfigMapVersions = new HashMap<>();
+ private Map<String, String> appliedSecretVersions = new HashMap<>();
+
+ public boolean isReady() {
+ return ready;
+ }
+
+ public void setReady(boolean ready) {
+ this.ready = ready;
+ }
+
+ public Map<String, String> getAppliedConfigMapVersions() {
+ return appliedConfigMapVersions;
+ }
+
+ public void setAppliedConfigMapVersions(Map<String, String> appliedConfigMapVersions) {
+ this.appliedConfigMapVersions = appliedConfigMapVersions;
+ }
+
+ public Map<String, String> getAppliedSecretVersions() {
+ return appliedSecretVersions;
+ }
+
+ public void setAppliedSecretVersions(Map<String, String> appliedSecretVersions) {
+ this.appliedSecretVersions = appliedSecretVersions;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritTemplate.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritTemplate.java
new file mode 100644
index 0000000..1144737
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritTemplate.java
@@ -0,0 +1,108 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GlobalRefDbConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.IngressConfig;
+import io.fabric8.kubernetes.api.model.KubernetesResource;
+import io.fabric8.kubernetes.api.model.ObjectMeta;
+import io.fabric8.kubernetes.api.model.ObjectMetaBuilder;
+
+@JsonDeserialize(using = com.fasterxml.jackson.databind.JsonDeserializer.None.class)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonPropertyOrder({"metadata", "spec"})
+public class GerritTemplate implements KubernetesResource {
+ private static final long serialVersionUID = 1L;
+
+ @JsonProperty("metadata")
+ private ObjectMeta metadata;
+
+ @JsonProperty("spec")
+ private GerritTemplateSpec spec;
+
+ public GerritTemplate() {}
+
+ @JsonProperty("metadata")
+ public ObjectMeta getMetadata() {
+ return metadata;
+ }
+
+ @JsonProperty("metadata")
+ public void setMetadata(ObjectMeta metadata) {
+ this.metadata = metadata;
+ }
+
+ @JsonProperty("spec")
+ public GerritTemplateSpec getSpec() {
+ return spec;
+ }
+
+ @JsonProperty("spec")
+ public void setSpec(GerritTemplateSpec spec) {
+ this.spec = spec;
+ }
+
+ @JsonIgnore
+ public Gerrit toGerrit(GerritCluster gerritCluster) {
+ Gerrit gerrit = new Gerrit();
+ gerrit.setMetadata(getGerritMetadata(gerritCluster));
+ GerritSpec gerritSpec = new GerritSpec(spec);
+ gerritSpec.setContainerImages(gerritCluster.getSpec().getContainerImages());
+ gerritSpec.setStorage(gerritCluster.getSpec().getStorage());
+ IngressConfig ingressConfig = new IngressConfig();
+ ingressConfig.setEnabled(gerritCluster.getSpec().getIngress().isEnabled());
+ ingressConfig.setHost(gerritCluster.getSpec().getIngress().getHost());
+ ingressConfig.setTlsEnabled(gerritCluster.getSpec().getIngress().getTls().isEnabled());
+ ingressConfig.setSsh(gerritCluster.getSpec().getIngress().getSsh());
+ gerritSpec.setIngress(ingressConfig);
+ gerritSpec.setServerId(getServerId(gerritCluster));
+ if (getSpec().isHighlyAvailablePrimary()) {
+ GlobalRefDbConfig refdb = gerritCluster.getSpec().getRefdb();
+ if (refdb.getZookeeper() != null && refdb.getZookeeper().getRootNode() == null) {
+ refdb
+ .getZookeeper()
+ .setRootNode(
+ gerritCluster.getMetadata().getNamespace()
+ + "/"
+ + gerritCluster.getMetadata().getName());
+ }
+ gerritSpec.setRefdb(gerritCluster.getSpec().getRefdb());
+ }
+ gerrit.setSpec(gerritSpec);
+ return gerrit;
+ }
+
+ @JsonIgnore
+ private ObjectMeta getGerritMetadata(GerritCluster gerritCluster) {
+ return new ObjectMetaBuilder()
+ .withName(metadata.getName())
+ .withLabels(metadata.getLabels())
+ .withNamespace(gerritCluster.getMetadata().getNamespace())
+ .build();
+ }
+
+ private String getServerId(GerritCluster gerritCluster) {
+ String serverId = gerritCluster.getSpec().getServerId();
+ return serverId.isBlank()
+ ? gerritCluster.getMetadata().getNamespace() + "/" + gerritCluster.getMetadata().getName()
+ : serverId;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritTemplateSpec.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritTemplateSpec.java
new file mode 100644
index 0000000..4349cd1
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gerrit/GerritTemplateSpec.java
@@ -0,0 +1,259 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.HttpSshServiceConfig;
+import io.fabric8.kubernetes.api.model.Affinity;
+import io.fabric8.kubernetes.api.model.ResourceRequirements;
+import io.fabric8.kubernetes.api.model.Toleration;
+import io.fabric8.kubernetes.api.model.TopologySpreadConstraint;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public class GerritTemplateSpec {
+ private String serviceAccount;
+
+ private List<Toleration> tolerations;
+ private Affinity affinity;
+ private List<TopologySpreadConstraint> topologySpreadConstraints = new ArrayList<>();
+ private String priorityClassName;
+
+ private int replicas = 1;
+ private int updatePartition = 0;
+
+ private ResourceRequirements resources;
+
+ private GerritProbe startupProbe = new GerritProbe();
+ private GerritProbe readinessProbe = new GerritProbe();
+ private GerritProbe livenessProbe = new GerritProbe();
+
+ private long gracefulStopTimeout = 30L;
+
+ private HttpSshServiceConfig service = new HttpSshServiceConfig();
+
+ private GerritSite site = new GerritSite();
+ private List<GerritPlugin> plugins = List.of();
+ private List<GerritModule> libs = List.of();
+ private Map<String, String> configFiles = Map.of();
+ private String secretRef;
+ private GerritMode mode = GerritMode.PRIMARY;
+
+ private GerritDebugConfig debug = new GerritDebugConfig();
+
+ public GerritTemplateSpec() {}
+
+ public GerritTemplateSpec(GerritTemplateSpec templateSpec) {
+ this.serviceAccount = templateSpec.serviceAccount;
+ this.tolerations = templateSpec.tolerations;
+ this.affinity = templateSpec.affinity;
+ this.topologySpreadConstraints = templateSpec.topologySpreadConstraints;
+ this.priorityClassName = templateSpec.priorityClassName;
+
+ this.replicas = templateSpec.replicas;
+ this.updatePartition = templateSpec.updatePartition;
+
+ this.resources = templateSpec.resources;
+
+ this.startupProbe = templateSpec.startupProbe;
+ this.readinessProbe = templateSpec.readinessProbe;
+ this.livenessProbe = templateSpec.livenessProbe;
+
+ this.gracefulStopTimeout = templateSpec.gracefulStopTimeout;
+
+ this.service = templateSpec.service;
+
+ this.site = templateSpec.site;
+ this.plugins = templateSpec.plugins;
+ this.libs = templateSpec.libs;
+ this.configFiles = templateSpec.configFiles;
+ this.secretRef = templateSpec.secretRef;
+ this.mode = templateSpec.mode;
+
+ this.debug = templateSpec.debug;
+ }
+
+ public String getServiceAccount() {
+ return serviceAccount;
+ }
+
+ public void setServiceAccount(String serviceAccount) {
+ this.serviceAccount = serviceAccount;
+ }
+
+ public List<Toleration> getTolerations() {
+ return tolerations;
+ }
+
+ public void setTolerations(List<Toleration> tolerations) {
+ this.tolerations = tolerations;
+ }
+
+ public Affinity getAffinity() {
+ return affinity;
+ }
+
+ public void setAffinity(Affinity affinity) {
+ this.affinity = affinity;
+ }
+
+ public List<TopologySpreadConstraint> getTopologySpreadConstraints() {
+ return topologySpreadConstraints;
+ }
+
+ public void setTopologySpreadConstraints(
+ List<TopologySpreadConstraint> topologySpreadConstraints) {
+ this.topologySpreadConstraints = topologySpreadConstraints;
+ }
+
+ public String getPriorityClassName() {
+ return priorityClassName;
+ }
+
+ public void setPriorityClassName(String priorityClassName) {
+ this.priorityClassName = priorityClassName;
+ }
+
+ public int getReplicas() {
+ return replicas;
+ }
+
+ public void setReplicas(int replicas) {
+ this.replicas = replicas;
+ }
+
+ public int getUpdatePartition() {
+ return updatePartition;
+ }
+
+ public void setUpdatePartition(int updatePartition) {
+ this.updatePartition = updatePartition;
+ }
+
+ public ResourceRequirements getResources() {
+ return resources;
+ }
+
+ public void setResources(ResourceRequirements resources) {
+ this.resources = resources;
+ }
+
+ public GerritProbe getStartupProbe() {
+ return startupProbe;
+ }
+
+ public void setStartupProbe(GerritProbe startupProbe) {
+ this.startupProbe = startupProbe;
+ }
+
+ public GerritProbe getReadinessProbe() {
+ return readinessProbe;
+ }
+
+ public void setReadinessProbe(GerritProbe readinessProbe) {
+ this.readinessProbe = readinessProbe;
+ }
+
+ public GerritProbe getLivenessProbe() {
+ return livenessProbe;
+ }
+
+ public void setLivenessProbe(GerritProbe livenessProbe) {
+ this.livenessProbe = livenessProbe;
+ }
+
+ public long getGracefulStopTimeout() {
+ return gracefulStopTimeout;
+ }
+
+ public void setGracefulStopTimeout(long gracefulStopTimeout) {
+ this.gracefulStopTimeout = gracefulStopTimeout;
+ }
+
+ public HttpSshServiceConfig getService() {
+ return service;
+ }
+
+ public void setService(HttpSshServiceConfig service) {
+ this.service = service;
+ }
+
+ public GerritSite getSite() {
+ return site;
+ }
+
+ public void setSite(GerritSite site) {
+ this.site = site;
+ }
+
+ public List<GerritPlugin> getPlugins() {
+ return plugins;
+ }
+
+ public void setPlugins(List<GerritPlugin> plugins) {
+ this.plugins = plugins;
+ }
+
+ public List<GerritModule> getLibs() {
+ return libs;
+ }
+
+ public void setLibs(List<GerritModule> libs) {
+ this.libs = libs;
+ }
+
+ public Map<String, String> getConfigFiles() {
+ return configFiles;
+ }
+
+ public void setConfigFiles(Map<String, String> configFiles) {
+ this.configFiles = configFiles;
+ }
+
+ public String getSecretRef() {
+ return secretRef;
+ }
+
+ public void setSecretRef(String secretRef) {
+ this.secretRef = secretRef;
+ }
+
+ public GerritMode getMode() {
+ return mode;
+ }
+
+ public void setMode(GerritMode mode) {
+ this.mode = mode;
+ }
+
+ public GerritDebugConfig getDebug() {
+ return debug;
+ }
+
+ public void setDebug(GerritDebugConfig debug) {
+ this.debug = debug;
+ }
+
+ public enum GerritMode {
+ PRIMARY,
+ REPLICA
+ }
+
+ @JsonIgnore
+ public boolean isHighlyAvailablePrimary() {
+ return getMode().equals(GerritMode.PRIMARY) && getReplicas() > 1;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gitgc/GitGarbageCollection.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gitgc/GitGarbageCollection.java
new file mode 100644
index 0000000..7d268bd
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gitgc/GitGarbageCollection.java
@@ -0,0 +1,43 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.gitgc;
+
+import io.fabric8.kubernetes.api.model.Namespaced;
+import io.fabric8.kubernetes.client.CustomResource;
+import io.fabric8.kubernetes.model.annotation.Group;
+import io.fabric8.kubernetes.model.annotation.Plural;
+import io.fabric8.kubernetes.model.annotation.ShortNames;
+import io.fabric8.kubernetes.model.annotation.Version;
+import org.apache.commons.lang3.builder.ToStringBuilder;
+import org.apache.commons.lang3.builder.ToStringStyle;
+
+@Group("gerritoperator.google.com")
+@Version("v1alpha1")
+@ShortNames("gitgc")
+@Plural("gitgcs")
+public class GitGarbageCollection
+ extends CustomResource<GitGarbageCollectionSpec, GitGarbageCollectionStatus>
+ implements Namespaced {
+ private static final long serialVersionUID = 1L;
+
+ public String toString() {
+ return ToStringBuilder.reflectionToString(this, ToStringStyle.JSON_STYLE);
+ }
+
+ @Override
+ protected GitGarbageCollectionStatus initStatus() {
+ return new GitGarbageCollectionStatus();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gitgc/GitGarbageCollectionSpec.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gitgc/GitGarbageCollectionSpec.java
new file mode 100644
index 0000000..7648f13
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gitgc/GitGarbageCollectionSpec.java
@@ -0,0 +1,102 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.gitgc;
+
+import io.fabric8.kubernetes.api.model.Affinity;
+import io.fabric8.kubernetes.api.model.ResourceRequirements;
+import io.fabric8.kubernetes.api.model.Toleration;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+
+public class GitGarbageCollectionSpec {
+ private String cluster;
+ private String schedule;
+ private Set<String> projects;
+ private ResourceRequirements resources;
+ private List<Toleration> tolerations;
+ private Affinity affinity;
+
+ public GitGarbageCollectionSpec() {
+ resources = new ResourceRequirements();
+ projects = new HashSet<>();
+ }
+
+ public String getCluster() {
+ return cluster;
+ }
+
+ public void setCluster(String cluster) {
+ this.cluster = cluster;
+ }
+
+ public void setSchedule(String schedule) {
+ this.schedule = schedule;
+ }
+
+ public String getSchedule() {
+ return schedule;
+ }
+
+ public Set<String> getProjects() {
+ return projects;
+ }
+
+ public void setProjects(Set<String> projects) {
+ this.projects = projects;
+ }
+
+ public void setResources(ResourceRequirements resources) {
+ this.resources = resources;
+ }
+
+ public ResourceRequirements getResources() {
+ return resources;
+ }
+
+ public List<Toleration> getTolerations() {
+ return tolerations;
+ }
+
+ public void setTolerations(List<Toleration> tolerations) {
+ this.tolerations = tolerations;
+ }
+
+ public Affinity getAffinity() {
+ return affinity;
+ }
+
+ public void setAffinity(Affinity affinity) {
+ this.affinity = affinity;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(cluster, projects, resources, schedule);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj instanceof GitGarbageCollectionSpec) {
+ GitGarbageCollectionSpec other = (GitGarbageCollectionSpec) obj;
+ return Objects.equals(cluster, other.cluster)
+ && Objects.equals(projects, other.projects)
+ && Objects.equals(resources, other.resources)
+ && Objects.equals(schedule, other.schedule);
+ }
+ return false;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gitgc/GitGarbageCollectionStatus.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gitgc/GitGarbageCollectionStatus.java
new file mode 100644
index 0000000..ddff4c7
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/gitgc/GitGarbageCollectionStatus.java
@@ -0,0 +1,76 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.gitgc;
+
+import java.util.HashSet;
+import java.util.Objects;
+import java.util.Set;
+
+public class GitGarbageCollectionStatus {
+ private boolean replicateAll = false;
+ private Set<String> excludedProjects = new HashSet<>();
+ private GitGcState state = GitGcState.INACTIVE;
+
+ public boolean isReplicateAll() {
+ return replicateAll;
+ }
+
+ public void setReplicateAll(boolean replicateAll) {
+ this.replicateAll = replicateAll;
+ }
+
+ public Set<String> getExcludedProjects() {
+ return excludedProjects;
+ }
+
+ public void resetExcludedProjects() {
+ excludedProjects = new HashSet<>();
+ }
+
+ public void excludeProjects(Set<String> projects) {
+ excludedProjects.addAll(projects);
+ }
+
+ public GitGcState getState() {
+ return state;
+ }
+
+ public void setState(GitGcState state) {
+ this.state = state;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(excludedProjects, replicateAll, state);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj instanceof GitGarbageCollectionStatus) {
+ GitGarbageCollectionStatus other = (GitGarbageCollectionStatus) obj;
+ return Objects.equals(excludedProjects, other.excludedProjects)
+ && replicateAll == other.replicateAll
+ && state == other.state;
+ }
+ return false;
+ }
+
+ public enum GitGcState {
+ ACTIVE,
+ INACTIVE,
+ CONFLICT,
+ ERROR
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/network/GerritNetwork.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/network/GerritNetwork.java
new file mode 100644
index 0000000..7596f54
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/network/GerritNetwork.java
@@ -0,0 +1,58 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.network;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import io.fabric8.kubernetes.api.model.Namespaced;
+import io.fabric8.kubernetes.api.model.Status;
+import io.fabric8.kubernetes.client.CustomResource;
+import io.fabric8.kubernetes.model.annotation.Group;
+import io.fabric8.kubernetes.model.annotation.ShortNames;
+import io.fabric8.kubernetes.model.annotation.Version;
+
+@Group("gerritoperator.google.com")
+@Version("v1alpha2")
+@ShortNames("gn")
+public class GerritNetwork extends CustomResource<GerritNetworkSpec, Status> implements Namespaced {
+ private static final long serialVersionUID = 1L;
+
+ public static final String SESSION_COOKIE_NAME = "Gerrit_Session";
+ public static final String SESSION_COOKIE_TTL = "60s";
+
+ @JsonIgnore
+ public String getDependentResourceName(String nameSuffix) {
+ return String.format("%s-%s", getMetadata().getName(), nameSuffix);
+ }
+
+ @JsonIgnore
+ public boolean hasPrimaryGerrit() {
+ return getSpec().getPrimaryGerrit() != null;
+ }
+
+ @JsonIgnore
+ public boolean hasGerritReplica() {
+ return getSpec().getGerritReplica() != null;
+ }
+
+ @JsonIgnore
+ public boolean hasGerrits() {
+ return hasGerritReplica() || hasPrimaryGerrit();
+ }
+
+ @JsonIgnore
+ public boolean hasReceiver() {
+ return getSpec().getReceiver() != null;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/network/GerritNetworkSpec.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/network/GerritNetworkSpec.java
new file mode 100644
index 0000000..406341f
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/network/GerritNetworkSpec.java
@@ -0,0 +1,69 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.network;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GerritClusterIngressConfig;
+import java.util.ArrayList;
+import java.util.List;
+
+public class GerritNetworkSpec {
+ private GerritClusterIngressConfig ingress = new GerritClusterIngressConfig();
+ private NetworkMember receiver;
+ private NetworkMemberWithSsh primaryGerrit;
+ private NetworkMemberWithSsh gerritReplica;
+
+ public GerritClusterIngressConfig getIngress() {
+ return ingress;
+ }
+
+ public void setIngress(GerritClusterIngressConfig ingress) {
+ this.ingress = ingress;
+ }
+
+ public NetworkMember getReceiver() {
+ return receiver;
+ }
+
+ public void setReceiver(NetworkMember receiver) {
+ this.receiver = receiver;
+ }
+
+ public NetworkMemberWithSsh getPrimaryGerrit() {
+ return primaryGerrit;
+ }
+
+ public void setPrimaryGerrit(NetworkMemberWithSsh primaryGerrit) {
+ this.primaryGerrit = primaryGerrit;
+ }
+
+ public NetworkMemberWithSsh getGerritReplica() {
+ return gerritReplica;
+ }
+
+ public void setGerritReplica(NetworkMemberWithSsh gerritReplica) {
+ this.gerritReplica = gerritReplica;
+ }
+
+ public List<NetworkMemberWithSsh> getGerrits() {
+ List<NetworkMemberWithSsh> gerrits = new ArrayList<>();
+ if (primaryGerrit != null) {
+ gerrits.add(primaryGerrit);
+ }
+ if (gerritReplica != null) {
+ gerrits.add(gerritReplica);
+ }
+ return gerrits;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/network/NetworkMember.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/network/NetworkMember.java
new file mode 100644
index 0000000..433dbbe
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/network/NetworkMember.java
@@ -0,0 +1,45 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.network;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.HttpServiceConfig;
+
+public class NetworkMember {
+ private String name;
+ private int httpPort = 8080;
+
+ public NetworkMember() {}
+
+ public NetworkMember(String name, HttpServiceConfig serviceConfig) {
+ this.name = name;
+ this.httpPort = serviceConfig.getHttpPort();
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public int getHttpPort() {
+ return httpPort;
+ }
+
+ public void setHttpPort(int httpPort) {
+ this.httpPort = httpPort;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/network/NetworkMemberWithSsh.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/network/NetworkMemberWithSsh.java
new file mode 100644
index 0000000..1668739
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/network/NetworkMemberWithSsh.java
@@ -0,0 +1,36 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.network;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.HttpSshServiceConfig;
+
+public class NetworkMemberWithSsh extends NetworkMember {
+ private int sshPort = 29418;
+
+ public NetworkMemberWithSsh() {}
+
+ public NetworkMemberWithSsh(String name, HttpSshServiceConfig serviceConfig) {
+ super(name, serviceConfig);
+ this.sshPort = serviceConfig.getSshPort();
+ }
+
+ public int getSshPort() {
+ return sshPort;
+ }
+
+ public void setSshPort(int sshPort) {
+ this.sshPort = sshPort;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/receiver/Receiver.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/receiver/Receiver.java
new file mode 100644
index 0000000..bc546df
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/receiver/Receiver.java
@@ -0,0 +1,34 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.receiver;
+
+import io.fabric8.kubernetes.api.model.Namespaced;
+import io.fabric8.kubernetes.client.CustomResource;
+import io.fabric8.kubernetes.model.annotation.Group;
+import io.fabric8.kubernetes.model.annotation.ShortNames;
+import io.fabric8.kubernetes.model.annotation.Version;
+import org.apache.commons.lang3.builder.ToStringBuilder;
+import org.apache.commons.lang3.builder.ToStringStyle;
+
+@Group("gerritoperator.google.com")
+@Version("v1alpha6")
+@ShortNames("grec")
+public class Receiver extends CustomResource<ReceiverSpec, ReceiverStatus> implements Namespaced {
+ private static final long serialVersionUID = 1L;
+
+ public String toString() {
+ return ToStringBuilder.reflectionToString(this, ToStringStyle.JSON_STYLE);
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/receiver/ReceiverProbe.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/receiver/ReceiverProbe.java
new file mode 100644
index 0000000..78aaa58
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/receiver/ReceiverProbe.java
@@ -0,0 +1,78 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.receiver;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.google.gerrit.k8s.operator.receiver.dependent.ReceiverDeployment;
+import io.fabric8.kubernetes.api.model.ExecAction;
+import io.fabric8.kubernetes.api.model.GRPCAction;
+import io.fabric8.kubernetes.api.model.HTTPGetAction;
+import io.fabric8.kubernetes.api.model.IntOrString;
+import io.fabric8.kubernetes.api.model.Probe;
+import io.fabric8.kubernetes.api.model.TCPSocketAction;
+import io.fabric8.kubernetes.api.model.TCPSocketActionBuilder;
+
+public class ReceiverProbe extends Probe {
+ private static final long serialVersionUID = 1L;
+
+ private static final TCPSocketAction TCP_SOCKET_ACTION =
+ new TCPSocketActionBuilder().withPort(new IntOrString(ReceiverDeployment.HTTP_PORT)).build();
+
+ @JsonIgnore private ExecAction exec;
+
+ @JsonIgnore private GRPCAction grpc;
+
+ @JsonIgnore private TCPSocketAction tcpSocket;
+
+ @Override
+ public void setExec(ExecAction exec) {
+ super.setExec(null);
+ }
+
+ @Override
+ public void setGrpc(GRPCAction grpc) {
+ super.setGrpc(null);
+ }
+
+ @Override
+ public void setHttpGet(HTTPGetAction httpGet) {
+ super.setHttpGet(null);
+ }
+
+ @Override
+ public void setTcpSocket(TCPSocketAction tcpSocket) {
+ super.setTcpSocket(TCP_SOCKET_ACTION);
+ }
+
+ @Override
+ public ExecAction getExec() {
+ return null;
+ }
+
+ @Override
+ public GRPCAction getGrpc() {
+ return null;
+ }
+
+ @Override
+ public HTTPGetAction getHttpGet() {
+ return null;
+ }
+
+ @Override
+ public TCPSocketAction getTcpSocket() {
+ return TCP_SOCKET_ACTION;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/receiver/ReceiverSpec.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/receiver/ReceiverSpec.java
new file mode 100644
index 0000000..005fa49
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/receiver/ReceiverSpec.java
@@ -0,0 +1,55 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.receiver;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.ContainerImageConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.IngressConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.StorageConfig;
+
+public class ReceiverSpec extends ReceiverTemplateSpec {
+ private ContainerImageConfig containerImages = new ContainerImageConfig();
+ private StorageConfig storage = new StorageConfig();
+ private IngressConfig ingress = new IngressConfig();
+
+ public ReceiverSpec() {}
+
+ public ReceiverSpec(ReceiverTemplateSpec templateSpec) {
+ super(templateSpec);
+ }
+
+ public ContainerImageConfig getContainerImages() {
+ return containerImages;
+ }
+
+ public void setContainerImages(ContainerImageConfig containerImages) {
+ this.containerImages = containerImages;
+ }
+
+ public StorageConfig getStorage() {
+ return storage;
+ }
+
+ public void setStorage(StorageConfig storage) {
+ this.storage = storage;
+ }
+
+ public IngressConfig getIngress() {
+ return ingress;
+ }
+
+ public void setIngress(IngressConfig ingress) {
+ this.ingress = ingress;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/receiver/ReceiverStatus.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/receiver/ReceiverStatus.java
new file mode 100644
index 0000000..915a62f
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/receiver/ReceiverStatus.java
@@ -0,0 +1,36 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.receiver;
+
+public class ReceiverStatus {
+ private boolean ready;
+ private String appliedCredentialSecretVersion = "";
+
+ public boolean isReady() {
+ return ready;
+ }
+
+ public void setReady(boolean ready) {
+ this.ready = ready;
+ }
+
+ public String getAppliedCredentialSecretVersion() {
+ return appliedCredentialSecretVersion;
+ }
+
+ public void setAppliedCredentialSecretVersion(String appliedCredentialSecretVersion) {
+ this.appliedCredentialSecretVersion = appliedCredentialSecretVersion;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/receiver/ReceiverTemplate.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/receiver/ReceiverTemplate.java
new file mode 100644
index 0000000..f559419
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/receiver/ReceiverTemplate.java
@@ -0,0 +1,86 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.receiver;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.IngressConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.StorageConfig;
+import io.fabric8.kubernetes.api.model.KubernetesResource;
+import io.fabric8.kubernetes.api.model.ObjectMeta;
+import io.fabric8.kubernetes.api.model.ObjectMetaBuilder;
+
+@JsonDeserialize(using = com.fasterxml.jackson.databind.JsonDeserializer.None.class)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonPropertyOrder({"metadata", "spec"})
+public class ReceiverTemplate implements KubernetesResource {
+ private static final long serialVersionUID = 1L;
+
+ @JsonProperty("metadata")
+ private ObjectMeta metadata;
+
+ @JsonProperty("spec")
+ private ReceiverTemplateSpec spec;
+
+ public ReceiverTemplate() {}
+
+ @JsonProperty("metadata")
+ public ObjectMeta getMetadata() {
+ return metadata;
+ }
+
+ @JsonProperty("metadata")
+ public void setMetadata(ObjectMeta metadata) {
+ this.metadata = metadata;
+ }
+
+ @JsonProperty("spec")
+ public ReceiverTemplateSpec getSpec() {
+ return spec;
+ }
+
+ @JsonProperty("spec")
+ public void setSpec(ReceiverTemplateSpec spec) {
+ this.spec = spec;
+ }
+
+ @JsonIgnore
+ public Receiver toReceiver(GerritCluster gerritCluster) {
+ Receiver receiver = new Receiver();
+ receiver.setMetadata(getReceiverMetadata(gerritCluster));
+ ReceiverSpec receiverSpec = new ReceiverSpec(spec);
+ receiverSpec.setContainerImages(gerritCluster.getSpec().getContainerImages());
+ receiverSpec.setStorage(new StorageConfig(gerritCluster.getSpec().getStorage()));
+ IngressConfig ingressConfig = new IngressConfig();
+ ingressConfig.setHost(gerritCluster.getSpec().getIngress().getHost());
+ ingressConfig.setTlsEnabled(gerritCluster.getSpec().getIngress().getTls().isEnabled());
+ receiverSpec.setIngress(ingressConfig);
+ receiver.setSpec(receiverSpec);
+ return receiver;
+ }
+
+ @JsonIgnore
+ private ObjectMeta getReceiverMetadata(GerritCluster gerritCluster) {
+ return new ObjectMetaBuilder()
+ .withName(metadata.getName())
+ .withLabels(metadata.getLabels())
+ .withNamespace(gerritCluster.getMetadata().getNamespace())
+ .build();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/receiver/ReceiverTemplateSpec.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/receiver/ReceiverTemplateSpec.java
new file mode 100644
index 0000000..22ee904
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/receiver/ReceiverTemplateSpec.java
@@ -0,0 +1,163 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.receiver;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.HttpServiceConfig;
+import io.fabric8.kubernetes.api.model.Affinity;
+import io.fabric8.kubernetes.api.model.IntOrString;
+import io.fabric8.kubernetes.api.model.ResourceRequirements;
+import io.fabric8.kubernetes.api.model.Toleration;
+import io.fabric8.kubernetes.api.model.TopologySpreadConstraint;
+import java.util.ArrayList;
+import java.util.List;
+
+public class ReceiverTemplateSpec {
+ private List<Toleration> tolerations = new ArrayList<>();
+ private Affinity affinity;
+ private List<TopologySpreadConstraint> topologySpreadConstraints = new ArrayList<>();
+ private String priorityClassName;
+
+ private int replicas = 1;
+ private IntOrString maxSurge = new IntOrString(1);
+ private IntOrString maxUnavailable = new IntOrString(1);
+
+ private ResourceRequirements resources;
+
+ private ReceiverProbe readinessProbe = new ReceiverProbe();
+ private ReceiverProbe livenessProbe = new ReceiverProbe();
+
+ private HttpServiceConfig service = new HttpServiceConfig();
+
+ private String credentialSecretRef;
+
+ public ReceiverTemplateSpec() {}
+
+ public ReceiverTemplateSpec(ReceiverTemplateSpec templateSpec) {
+ this.tolerations = templateSpec.tolerations;
+ this.affinity = templateSpec.affinity;
+ this.topologySpreadConstraints = templateSpec.topologySpreadConstraints;
+ this.priorityClassName = templateSpec.priorityClassName;
+
+ this.replicas = templateSpec.replicas;
+
+ this.resources = templateSpec.resources;
+ this.maxSurge = templateSpec.maxSurge;
+ this.maxUnavailable = templateSpec.maxUnavailable;
+
+ this.readinessProbe = templateSpec.readinessProbe;
+ this.livenessProbe = templateSpec.livenessProbe;
+
+ this.service = templateSpec.service;
+
+ this.credentialSecretRef = templateSpec.credentialSecretRef;
+ }
+
+ public List<Toleration> getTolerations() {
+ return tolerations;
+ }
+
+ public void setTolerations(List<Toleration> tolerations) {
+ this.tolerations = tolerations;
+ }
+
+ public Affinity getAffinity() {
+ return affinity;
+ }
+
+ public void setAffinity(Affinity affinity) {
+ this.affinity = affinity;
+ }
+
+ public List<TopologySpreadConstraint> getTopologySpreadConstraints() {
+ return topologySpreadConstraints;
+ }
+
+ public void setTopologySpreadConstraints(
+ List<TopologySpreadConstraint> topologySpreadConstraints) {
+ this.topologySpreadConstraints = topologySpreadConstraints;
+ }
+
+ public String getPriorityClassName() {
+ return priorityClassName;
+ }
+
+ public void setPriorityClassName(String priorityClassName) {
+ this.priorityClassName = priorityClassName;
+ }
+
+ public int getReplicas() {
+ return replicas;
+ }
+
+ public void setReplicas(int replicas) {
+ this.replicas = replicas;
+ }
+
+ public IntOrString getMaxSurge() {
+ return maxSurge;
+ }
+
+ public void setMaxSurge(IntOrString maxSurge) {
+ this.maxSurge = maxSurge;
+ }
+
+ public IntOrString getMaxUnavailable() {
+ return maxUnavailable;
+ }
+
+ public void setMaxUnavailable(IntOrString maxUnavailable) {
+ this.maxUnavailable = maxUnavailable;
+ }
+
+ public ResourceRequirements getResources() {
+ return resources;
+ }
+
+ public void setResources(ResourceRequirements resources) {
+ this.resources = resources;
+ }
+
+ public ReceiverProbe getReadinessProbe() {
+ return readinessProbe;
+ }
+
+ public void setReadinessProbe(ReceiverProbe readinessProbe) {
+ this.readinessProbe = readinessProbe;
+ }
+
+ public ReceiverProbe getLivenessProbe() {
+ return livenessProbe;
+ }
+
+ public void setLivenessProbe(ReceiverProbe livenessProbe) {
+ this.livenessProbe = livenessProbe;
+ }
+
+ public HttpServiceConfig getService() {
+ return service;
+ }
+
+ public void setService(HttpServiceConfig service) {
+ this.service = service;
+ }
+
+ public String getCredentialSecretRef() {
+ return credentialSecretRef;
+ }
+
+ public void setCredentialSecretRef(String credentialSecretRef) {
+ this.credentialSecretRef = credentialSecretRef;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/BusyBoxImage.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/BusyBoxImage.java
new file mode 100644
index 0000000..5c5388f
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/BusyBoxImage.java
@@ -0,0 +1,62 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.shared;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+
+public class BusyBoxImage {
+ private String registry;
+ private String tag;
+
+ public BusyBoxImage() {
+ this.registry = "docker.io";
+ this.tag = "latest";
+ }
+
+ public void setRegistry(String registry) {
+ this.registry = registry;
+ }
+
+ public String getRegistry() {
+ return registry;
+ }
+
+ public void setTag(String tag) {
+ this.tag = tag;
+ }
+
+ public String getTag() {
+ return tag;
+ }
+
+ @JsonIgnore
+ public String getBusyBoxImage() {
+ StringBuilder builder = new StringBuilder();
+
+ if (registry != null) {
+ builder.append(registry);
+ builder.append("/");
+ }
+
+ builder.append("busybox");
+
+ if (tag != null) {
+ builder.append(":");
+ builder.append(tag);
+ }
+
+ return builder.toString();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/ContainerImageConfig.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/ContainerImageConfig.java
new file mode 100644
index 0000000..4f84824
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/ContainerImageConfig.java
@@ -0,0 +1,58 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.shared;
+
+import io.fabric8.kubernetes.api.model.LocalObjectReference;
+import java.util.HashSet;
+import java.util.Set;
+
+public class ContainerImageConfig {
+ private String imagePullPolicy = "Always";
+ private Set<LocalObjectReference> imagePullSecrets = new HashSet<>();
+ private BusyBoxImage busyBox = new BusyBoxImage();
+ private GerritRepositoryConfig gerritImages = new GerritRepositoryConfig();
+
+ public String getImagePullPolicy() {
+ return imagePullPolicy;
+ }
+
+ public void setImagePullPolicy(String imagePullPolicy) {
+ this.imagePullPolicy = imagePullPolicy;
+ }
+
+ public Set<LocalObjectReference> getImagePullSecrets() {
+ return imagePullSecrets;
+ }
+
+ public void setImagePullSecrets(Set<LocalObjectReference> imagePullSecrets) {
+ this.imagePullSecrets = imagePullSecrets;
+ }
+
+ public BusyBoxImage getBusyBox() {
+ return busyBox;
+ }
+
+ public void setBusyBox(BusyBoxImage busyBox) {
+ this.busyBox = busyBox;
+ }
+
+ public GerritRepositoryConfig getGerritImages() {
+ return gerritImages;
+ }
+
+ public void setGerritImages(GerritRepositoryConfig gerritImages) {
+ this.gerritImages = gerritImages;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GerritClusterIngressConfig.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GerritClusterIngressConfig.java
new file mode 100644
index 0000000..a915820
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GerritClusterIngressConfig.java
@@ -0,0 +1,85 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.shared;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import java.util.Map;
+
+public class GerritClusterIngressConfig {
+ private boolean enabled = false;
+ private String host;
+ private Map<String, String> annotations;
+ private GerritIngressTlsConfig tls = new GerritIngressTlsConfig();
+ private GerritIngressSshConfig ssh = new GerritIngressSshConfig();
+ private GerritIngressAmbassadorConfig ambassador = new GerritIngressAmbassadorConfig();
+
+ public boolean isEnabled() {
+ return enabled;
+ }
+
+ public void setEnabled(boolean enabled) {
+ this.enabled = enabled;
+ }
+
+ public String getHost() {
+ return host;
+ }
+
+ public void setHost(String host) {
+ this.host = host;
+ }
+
+ public Map<String, String> getAnnotations() {
+ return annotations;
+ }
+
+ public void setAnnotations(Map<String, String> annotations) {
+ this.annotations = annotations;
+ }
+
+ public GerritIngressTlsConfig getTls() {
+ return tls;
+ }
+
+ public void setTls(GerritIngressTlsConfig tls) {
+ this.tls = tls;
+ }
+
+ public GerritIngressSshConfig getSsh() {
+ return ssh;
+ }
+
+ public void setSsh(GerritIngressSshConfig ssh) {
+ this.ssh = ssh;
+ }
+
+ public GerritIngressAmbassadorConfig getAmbassador() {
+ return ambassador;
+ }
+
+ public void setAmbassador(GerritIngressAmbassadorConfig ambassador) {
+ this.ambassador = ambassador;
+ }
+
+ @JsonIgnore
+ public String getFullHostnameForService(String svcName) {
+ return getFullHostnameForService(svcName, getHost());
+ }
+
+ @JsonIgnore
+ public static String getFullHostnameForService(String svcName, String ingressHost) {
+ return String.format("%s.%s", svcName, ingressHost);
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GerritIngressAmbassadorConfig.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GerritIngressAmbassadorConfig.java
new file mode 100644
index 0000000..b34eb67
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GerritIngressAmbassadorConfig.java
@@ -0,0 +1,38 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.shared;
+
+import java.util.List;
+
+public class GerritIngressAmbassadorConfig {
+ private List<String> id;
+ private boolean createHost;
+
+ public List<String> getId() {
+ return this.id;
+ }
+
+ public void setId(List<String> id) {
+ this.id = id;
+ }
+
+ public boolean getCreateHost() {
+ return this.createHost;
+ }
+
+ public void setCreateHost(boolean createHost) {
+ this.createHost = createHost;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GerritIngressSshConfig.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GerritIngressSshConfig.java
new file mode 100644
index 0000000..6203803
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GerritIngressSshConfig.java
@@ -0,0 +1,27 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.shared;
+
+public class GerritIngressSshConfig {
+ private boolean enabled = false;
+
+ public boolean isEnabled() {
+ return enabled;
+ }
+
+ public void setEnabled(boolean enabled) {
+ this.enabled = enabled;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GerritIngressTlsConfig.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GerritIngressTlsConfig.java
new file mode 100644
index 0000000..b4552f5
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GerritIngressTlsConfig.java
@@ -0,0 +1,37 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.shared;
+
+public class GerritIngressTlsConfig {
+
+ private boolean enabled = false;
+ private String secret;
+
+ public boolean isEnabled() {
+ return enabled;
+ }
+
+ public void setEnabled(boolean enabled) {
+ this.enabled = enabled;
+ }
+
+ public String getSecret() {
+ return secret;
+ }
+
+ public void setSecret(String secret) {
+ this.secret = secret;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GerritRepositoryConfig.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GerritRepositoryConfig.java
new file mode 100644
index 0000000..f593c1e
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GerritRepositoryConfig.java
@@ -0,0 +1,77 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.shared;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+
+public class GerritRepositoryConfig {
+ private String registry;
+ private String org;
+ private String tag;
+
+ public GerritRepositoryConfig() {
+ this.registry = "docker.io";
+ this.org = "k8sgerrit";
+ this.tag = "latest";
+ }
+
+ public void setRegistry(String registry) {
+ this.registry = registry;
+ }
+
+ public String getRegistry() {
+ return registry;
+ }
+
+ public String getOrg() {
+ return org;
+ }
+
+ public void setOrg(String org) {
+ this.org = org;
+ }
+
+ public void setTag(String tag) {
+ this.tag = tag;
+ }
+
+ public String getTag() {
+ return tag;
+ }
+
+ @JsonIgnore
+ public String getFullImageName(String image) {
+ StringBuilder builder = new StringBuilder();
+
+ if (registry != null) {
+ builder.append(registry);
+ builder.append("/");
+ }
+
+ if (org != null) {
+ builder.append(org);
+ builder.append("/");
+ }
+
+ builder.append(image);
+
+ if (tag != null) {
+ builder.append(":");
+ builder.append(tag);
+ }
+
+ return builder.toString();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GerritStorageConfig.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GerritStorageConfig.java
new file mode 100644
index 0000000..d66a0cd
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GerritStorageConfig.java
@@ -0,0 +1,39 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.shared;
+
+public class GerritStorageConfig extends StorageConfig {
+ private PluginCacheConfig pluginCache = new PluginCacheConfig();
+
+ public PluginCacheConfig getPluginCache() {
+ return pluginCache;
+ }
+
+ public void setPluginCache(PluginCacheConfig pluginCache) {
+ this.pluginCache = pluginCache;
+ }
+
+ public class PluginCacheConfig {
+ private boolean enabled;
+
+ public boolean isEnabled() {
+ return enabled;
+ }
+
+ public void setEnabled(boolean enabled) {
+ this.enabled = enabled;
+ }
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GlobalRefDbConfig.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GlobalRefDbConfig.java
new file mode 100644
index 0000000..d338555
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/GlobalRefDbConfig.java
@@ -0,0 +1,51 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.shared;
+
+public class GlobalRefDbConfig {
+ private RefDatabase database = RefDatabase.NONE;
+ private ZookeeperRefDbConfig zookeeper;
+ private SpannerRefDbConfig spanner;
+
+ public RefDatabase getDatabase() {
+ return database;
+ }
+
+ public void setDatabase(RefDatabase database) {
+ this.database = database;
+ }
+
+ public ZookeeperRefDbConfig getZookeeper() {
+ return zookeeper;
+ }
+
+ public void setZookeeper(ZookeeperRefDbConfig zookeeper) {
+ this.zookeeper = zookeeper;
+ }
+
+ public SpannerRefDbConfig getSpanner() {
+ return spanner;
+ }
+
+ public void setSpanner(SpannerRefDbConfig spanner) {
+ this.spanner = spanner;
+ }
+
+ public enum RefDatabase {
+ NONE,
+ ZOOKEEPER,
+ SPANNER,
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/HttpServiceConfig.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/HttpServiceConfig.java
new file mode 100644
index 0000000..8e7d651
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/HttpServiceConfig.java
@@ -0,0 +1,40 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.shared;
+
+import java.io.Serializable;
+
+public class HttpServiceConfig implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ String type = "NodePort";
+ int httpPort = 80;
+
+ public String getType() {
+ return type;
+ }
+
+ public void setType(String type) {
+ this.type = type;
+ }
+
+ public int getHttpPort() {
+ return httpPort;
+ }
+
+ public void setHttpPort(int httpPort) {
+ this.httpPort = httpPort;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/HttpSshServiceConfig.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/HttpSshServiceConfig.java
new file mode 100644
index 0000000..8655c32
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/HttpSshServiceConfig.java
@@ -0,0 +1,31 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.shared;
+
+import java.io.Serializable;
+
+public class HttpSshServiceConfig extends HttpServiceConfig implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ int sshPort = 0;
+
+ public int getSshPort() {
+ return sshPort;
+ }
+
+ public void setSshPort(int sshPort) {
+ this.sshPort = sshPort;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/IngressConfig.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/IngressConfig.java
new file mode 100644
index 0000000..f83b189
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/IngressConfig.java
@@ -0,0 +1,75 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.shared;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+
+public class IngressConfig {
+ private boolean enabled;
+ private String host;
+ private boolean tlsEnabled;
+ private GerritIngressSshConfig ssh = new GerritIngressSshConfig();
+
+ public boolean isEnabled() {
+ return enabled;
+ }
+
+ public void setEnabled(boolean enabled) {
+ this.enabled = enabled;
+ }
+
+ public String getHost() {
+ return host;
+ }
+
+ public void setHost(String host) {
+ this.host = host;
+ }
+
+ public boolean isTlsEnabled() {
+ return tlsEnabled;
+ }
+
+ public void setTlsEnabled(boolean tlsEnabled) {
+ this.tlsEnabled = tlsEnabled;
+ }
+
+ public GerritIngressSshConfig getSsh() {
+ return ssh;
+ }
+
+ public void setSsh(GerritIngressSshConfig ssh) {
+ this.ssh = ssh;
+ }
+
+ @JsonIgnore
+ public String getFullHostnameForService(String svcName) {
+ return String.format("%s.%s", svcName, getHost());
+ }
+
+ @JsonIgnore
+ public String getUrl() {
+ String protocol = isTlsEnabled() ? "https" : "http";
+ String hostname = getHost();
+ return String.format("%s://%s", protocol, hostname);
+ }
+
+ @JsonIgnore
+ public String getSshUrl() {
+ String protocol = isTlsEnabled() ? "https" : "http";
+ String hostname = getHost();
+ return String.format("%s://%s", protocol, hostname);
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/NfsWorkaroundConfig.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/NfsWorkaroundConfig.java
new file mode 100644
index 0000000..ed4a4d1
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/NfsWorkaroundConfig.java
@@ -0,0 +1,46 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.shared;
+
+public class NfsWorkaroundConfig {
+
+ private boolean enabled = false;
+ private boolean chownOnStartup = false;
+ private String idmapdConfig;
+
+ public boolean isEnabled() {
+ return enabled;
+ }
+
+ public void setEnabled(boolean enabled) {
+ this.enabled = enabled;
+ }
+
+ public boolean isChownOnStartup() {
+ return chownOnStartup;
+ }
+
+ public void setChownOnStartup(boolean chownOnStartup) {
+ this.chownOnStartup = chownOnStartup;
+ }
+
+ public String getIdmapdConfig() {
+ return idmapdConfig;
+ }
+
+ public void setIdmapdConfig(String idmapdConfig) {
+ this.idmapdConfig = idmapdConfig;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/SharedStorage.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/SharedStorage.java
new file mode 100644
index 0000000..d2193c1
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/SharedStorage.java
@@ -0,0 +1,78 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.shared;
+
+import io.fabric8.kubernetes.api.model.LabelSelector;
+import io.fabric8.kubernetes.api.model.Quantity;
+
+public class SharedStorage {
+ private ExternalPVCConfig externalPVC = new ExternalPVCConfig();
+ private Quantity size;
+ private String volumeName;
+ private LabelSelector selector;
+
+ public ExternalPVCConfig getExternalPVC() {
+ return externalPVC;
+ }
+
+ public void setExternalPVC(ExternalPVCConfig externalPVC) {
+ this.externalPVC = externalPVC;
+ }
+
+ public Quantity getSize() {
+ return size;
+ }
+
+ public String getVolumeName() {
+ return volumeName;
+ }
+
+ public void setSize(Quantity size) {
+ this.size = size;
+ }
+
+ public void setVolumeName(String volumeName) {
+ this.volumeName = volumeName;
+ }
+
+ public LabelSelector getSelector() {
+ return selector;
+ }
+
+ public void setSelector(LabelSelector selector) {
+ this.selector = selector;
+ }
+
+ public class ExternalPVCConfig {
+ private boolean enabled;
+ private String claimName = "";
+
+ public boolean isEnabled() {
+ return enabled;
+ }
+
+ public void setEnabled(boolean enabled) {
+ this.enabled = enabled;
+ }
+
+ public String getClaimName() {
+ return claimName;
+ }
+
+ public void setClaimName(String claimName) {
+ this.claimName = claimName;
+ }
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/SpannerRefDbConfig.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/SpannerRefDbConfig.java
new file mode 100644
index 0000000..eee7eab
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/SpannerRefDbConfig.java
@@ -0,0 +1,45 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.shared;
+
+public class SpannerRefDbConfig {
+ private String projectName;
+ private String instance;
+ private String database;
+
+ public String getProjectName() {
+ return projectName;
+ }
+
+ public void setProjectName(String projectName) {
+ this.projectName = projectName;
+ }
+
+ public String getInstance() {
+ return instance;
+ }
+
+ public void setInstance(String instance) {
+ this.instance = instance;
+ }
+
+ public String getDatabase() {
+ return database;
+ }
+
+ public void setDatabase(String database) {
+ this.database = database;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/StorageClassConfig.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/StorageClassConfig.java
new file mode 100644
index 0000000..de4906b
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/StorageClassConfig.java
@@ -0,0 +1,46 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.shared;
+
+public class StorageClassConfig {
+
+ String readWriteOnce = "default";
+ String readWriteMany = "shared-storage";
+ NfsWorkaroundConfig nfsWorkaround = new NfsWorkaroundConfig();
+
+ public String getReadWriteOnce() {
+ return readWriteOnce;
+ }
+
+ public String getReadWriteMany() {
+ return readWriteMany;
+ }
+
+ public void setReadWriteOnce(String readWriteOnce) {
+ this.readWriteOnce = readWriteOnce;
+ }
+
+ public void setReadWriteMany(String readWriteMany) {
+ this.readWriteMany = readWriteMany;
+ }
+
+ public NfsWorkaroundConfig getNfsWorkaround() {
+ return nfsWorkaround;
+ }
+
+ public void setNfsWorkaround(NfsWorkaroundConfig nfsWorkaround) {
+ this.nfsWorkaround = nfsWorkaround;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/StorageConfig.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/StorageConfig.java
new file mode 100644
index 0000000..566ce6e
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/StorageConfig.java
@@ -0,0 +1,44 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.shared;
+
+public class StorageConfig {
+
+ private StorageClassConfig storageClasses;
+ private SharedStorage sharedStorage;
+
+ public StorageConfig() {}
+
+ public StorageConfig(GerritStorageConfig gerritStorageConfig) {
+ storageClasses = gerritStorageConfig.getStorageClasses();
+ sharedStorage = gerritStorageConfig.getSharedStorage();
+ }
+
+ public StorageClassConfig getStorageClasses() {
+ return storageClasses;
+ }
+
+ public void setStorageClasses(StorageClassConfig storageClasses) {
+ this.storageClasses = storageClasses;
+ }
+
+ public SharedStorage getSharedStorage() {
+ return sharedStorage;
+ }
+
+ public void setSharedStorage(SharedStorage sharedStorage) {
+ this.sharedStorage = sharedStorage;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/ZookeeperRefDbConfig.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/ZookeeperRefDbConfig.java
new file mode 100644
index 0000000..e90faeb
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/api/model/shared/ZookeeperRefDbConfig.java
@@ -0,0 +1,36 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.api.model.shared;
+
+public class ZookeeperRefDbConfig {
+ private String connectString;
+ private String rootNode;
+
+ public String getConnectString() {
+ return connectString;
+ }
+
+ public void setConnectString(String connectString) {
+ this.connectString = connectString;
+ }
+
+ public String getRootNode() {
+ return rootNode;
+ }
+
+ public void setRootNode(String rootNode) {
+ this.rootNode = rootNode;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/gerrit/config/GerritConfigBuilder.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/gerrit/config/GerritConfigBuilder.java
new file mode 100644
index 0000000..221704d
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/gerrit/config/GerritConfigBuilder.java
@@ -0,0 +1,172 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.gerrit.config;
+
+import static com.google.gerrit.k8s.operator.gerrit.dependent.GerritStatefulSet.HTTP_PORT;
+import static com.google.gerrit.k8s.operator.gerrit.dependent.GerritStatefulSet.SSH_PORT;
+
+import com.google.common.collect.ImmutableList;
+import com.google.gerrit.k8s.operator.gerrit.config.ConfigBuilder;
+import com.google.gerrit.k8s.operator.gerrit.config.RequiredOption;
+import com.google.gerrit.k8s.operator.gerrit.dependent.GerritService;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplateSpec.GerritMode;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.IngressConfig;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public class GerritConfigBuilder extends ConfigBuilder {
+ private static final Pattern PROTOCOL_PATTERN = Pattern.compile("^(https?)://.+");
+
+ public GerritConfigBuilder(Gerrit gerrit) {
+ super(
+ gerrit.getSpec().getConfigFiles().getOrDefault("gerrit.config", ""),
+ ImmutableList.copyOf(collectRequiredOptions(gerrit)));
+ }
+
+ private static List<RequiredOption<?>> collectRequiredOptions(Gerrit gerrit) {
+ List<RequiredOption<?>> requiredOptions = new ArrayList<>();
+ requiredOptions.addAll(cacheSection(gerrit));
+ requiredOptions.addAll(containerSection(gerrit));
+ requiredOptions.addAll(gerritSection(gerrit));
+ requiredOptions.addAll(httpdSection(gerrit));
+ requiredOptions.addAll(sshdSection(gerrit));
+ return requiredOptions;
+ }
+
+ private static List<RequiredOption<?>> cacheSection(Gerrit gerrit) {
+ List<RequiredOption<?>> requiredOptions = new ArrayList<>();
+ requiredOptions.add(new RequiredOption<String>("cache", "directory", "cache"));
+ return requiredOptions;
+ }
+
+ private static List<RequiredOption<?>> containerSection(Gerrit gerrit) {
+ List<RequiredOption<?>> requiredOptions = new ArrayList<>();
+ requiredOptions.add(new RequiredOption<String>("container", "user", "gerrit"));
+ requiredOptions.add(
+ new RequiredOption<Boolean>(
+ "container", "replica", gerrit.getSpec().getMode().equals(GerritMode.REPLICA)));
+ requiredOptions.add(
+ new RequiredOption<String>("container", "javaHome", "/usr/lib/jvm/java-11-openjdk"));
+ requiredOptions.add(javaOptions(gerrit));
+ return requiredOptions;
+ }
+
+ private static List<RequiredOption<?>> gerritSection(Gerrit gerrit) {
+ List<RequiredOption<?>> requiredOptions = new ArrayList<>();
+ String serverId = gerrit.getSpec().getServerId();
+ requiredOptions.add(new RequiredOption<String>("gerrit", "basepath", "git"));
+ if (serverId != null && !serverId.isBlank()) {
+ requiredOptions.add(new RequiredOption<String>("gerrit", "serverId", serverId));
+ }
+
+ if (gerrit.getSpec().isHighlyAvailablePrimary()) {
+ requiredOptions.add(
+ new RequiredOption<Set<String>>(
+ "gerrit",
+ "installModule",
+ Set.of("com.gerritforge.gerrit.globalrefdb.validation.LibModule")));
+ requiredOptions.add(
+ new RequiredOption<Set<String>>(
+ "gerrit",
+ "installDbModule",
+ Set.of("com.ericsson.gerrit.plugins.highavailability.ValidationModule")));
+ }
+
+ IngressConfig ingressConfig = gerrit.getSpec().getIngress();
+ if (ingressConfig.isEnabled()) {
+ requiredOptions.add(
+ new RequiredOption<String>("gerrit", "canonicalWebUrl", ingressConfig.getUrl()));
+ }
+
+ return requiredOptions;
+ }
+
+ private static List<RequiredOption<?>> httpdSection(Gerrit gerrit) {
+ List<RequiredOption<?>> requiredOptions = new ArrayList<>();
+ IngressConfig ingressConfig = gerrit.getSpec().getIngress();
+ if (ingressConfig.isEnabled()) {
+ requiredOptions.add(listenUrl(ingressConfig.getUrl()));
+ }
+ return requiredOptions;
+ }
+
+ private static List<RequiredOption<?>> sshdSection(Gerrit gerrit) {
+ List<RequiredOption<?>> requiredOptions = new ArrayList<>();
+ requiredOptions.add(sshListenAddress(gerrit));
+ IngressConfig ingressConfig = gerrit.getSpec().getIngress();
+ if (ingressConfig.isEnabled() && gerrit.isSshEnabled()) {
+ requiredOptions.add(sshAdvertisedAddress(gerrit));
+ }
+ return requiredOptions;
+ }
+
+ private static RequiredOption<Set<String>> javaOptions(Gerrit gerrit) {
+ Set<String> javaOptions = new HashSet<>();
+ javaOptions.add("-Djavax.net.ssl.trustStore=/var/gerrit/etc/keystore");
+ if (gerrit.getSpec().isHighlyAvailablePrimary()) {
+ javaOptions.add("-Djava.net.preferIPv4Stack=true");
+ }
+ if (gerrit.getSpec().getDebug().isEnabled()) {
+ javaOptions.add("-Xdebug");
+ String debugServerCfg = "-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000";
+ if (gerrit.getSpec().getDebug().isSuspend()) {
+ debugServerCfg = debugServerCfg + ",suspend=y";
+ } else {
+ debugServerCfg = debugServerCfg + ",suspend=n";
+ }
+ javaOptions.add(debugServerCfg);
+ }
+ return new RequiredOption<Set<String>>("container", "javaOptions", javaOptions);
+ }
+
+ private static RequiredOption<String> listenUrl(String url) {
+ StringBuilder listenUrlBuilder = new StringBuilder();
+ listenUrlBuilder.append("proxy-");
+ Matcher protocolMatcher = PROTOCOL_PATTERN.matcher(url);
+ if (protocolMatcher.matches()) {
+ listenUrlBuilder.append(protocolMatcher.group(1));
+ } else {
+ throw new IllegalStateException(
+ String.format("Unknown protocol used for canonicalWebUrl: %s", url));
+ }
+ listenUrlBuilder.append("://*:");
+ listenUrlBuilder.append(HTTP_PORT);
+ listenUrlBuilder.append("/");
+ return new RequiredOption<String>("httpd", "listenUrl", listenUrlBuilder.toString());
+ }
+
+ private static RequiredOption<String> sshListenAddress(Gerrit gerrit) {
+ String listenAddress;
+ if (gerrit.isSshEnabled()) {
+ listenAddress = "*:" + SSH_PORT;
+ } else {
+ listenAddress = "off";
+ }
+ return new RequiredOption<String>("sshd", "listenAddress", listenAddress);
+ }
+
+ private static RequiredOption<String> sshAdvertisedAddress(Gerrit gerrit) {
+ return new RequiredOption<String>(
+ "sshd",
+ "advertisedAddress",
+ gerrit.getSpec().getIngress().getFullHostnameForService(GerritService.getName(gerrit))
+ + ":29418");
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/gerrit/config/HighAvailabilityPluginConfigBuilder.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/gerrit/config/HighAvailabilityPluginConfigBuilder.java
new file mode 100644
index 0000000..b96a93b
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/gerrit/config/HighAvailabilityPluginConfigBuilder.java
@@ -0,0 +1,65 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.gerrit.config;
+
+import com.google.common.collect.ImmutableList;
+import com.google.gerrit.k8s.operator.gerrit.config.ConfigBuilder;
+import com.google.gerrit.k8s.operator.gerrit.config.RequiredOption;
+import com.google.gerrit.k8s.operator.gerrit.dependent.GerritStatefulSet;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+public class HighAvailabilityPluginConfigBuilder extends ConfigBuilder {
+ public HighAvailabilityPluginConfigBuilder(Gerrit gerrit) {
+ super(
+ gerrit.getSpec().getConfigFiles().getOrDefault("high-availability.config", ""),
+ ImmutableList.copyOf(collectRequiredOptions(gerrit)));
+ }
+
+ private static List<RequiredOption<?>> collectRequiredOptions(Gerrit gerrit) {
+ List<RequiredOption<?>> requiredOptions = new ArrayList<>();
+ requiredOptions.add(new RequiredOption<String>("main", "sharedDirectory", "shared"));
+ requiredOptions.add(new RequiredOption<String>("peerInfo", "strategy", "jgroups"));
+ requiredOptions.add(new RequiredOption<String>("peerInfo", "jgroups", "myUrl", null));
+ requiredOptions.add(
+ new RequiredOption<String>("jgroups", "clusterName", gerrit.getMetadata().getName()));
+ requiredOptions.add(new RequiredOption<Boolean>("jgroups", "kubernetes", true));
+ requiredOptions.add(
+ new RequiredOption<String>(
+ "jgroups", "kubernetes", "namespace", gerrit.getMetadata().getNamespace()));
+ requiredOptions.add(
+ new RequiredOption<Set<String>>("jgroups", "kubernetes", "label", getLabels(gerrit)));
+ requiredOptions.add(new RequiredOption<Boolean>("cache", "synchronize", true));
+ requiredOptions.add(new RequiredOption<Boolean>("event", "synchronize", true));
+ requiredOptions.add(new RequiredOption<Boolean>("index", "synchronize", true));
+ requiredOptions.add(new RequiredOption<Boolean>("index", "synchronizeForced", true));
+ requiredOptions.add(new RequiredOption<Boolean>("healthcheck", "enable", true));
+ requiredOptions.add(new RequiredOption<Boolean>("ref-database", "enabled", true));
+ return requiredOptions;
+ }
+
+ private static Set<String> getLabels(Gerrit gerrit) {
+ Map<String, String> selectorLabels = GerritStatefulSet.getSelectorLabels(gerrit);
+ Set<String> labels = new HashSet<>();
+ for (Map.Entry<String, String> label : selectorLabels.entrySet()) {
+ labels.add(label.getKey() + "=" + label.getValue());
+ }
+ return labels;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/gerrit/config/SpannerRefDbPluginConfigBuilder.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/gerrit/config/SpannerRefDbPluginConfigBuilder.java
new file mode 100644
index 0000000..0f3cb30
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/gerrit/config/SpannerRefDbPluginConfigBuilder.java
@@ -0,0 +1,58 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.gerrit.config;
+
+import com.google.common.collect.ImmutableList;
+import com.google.gerrit.k8s.operator.gerrit.config.ConfigBuilder;
+import com.google.gerrit.k8s.operator.gerrit.config.RequiredOption;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import java.util.ArrayList;
+import java.util.List;
+
+public class SpannerRefDbPluginConfigBuilder extends ConfigBuilder {
+ public SpannerRefDbPluginConfigBuilder(Gerrit gerrit) {
+ super(
+ gerrit.getSpec().getConfigFiles().getOrDefault("spanner-refdb.config", ""),
+ ImmutableList.copyOf(collectRequiredOptions(gerrit)));
+ }
+
+ private static List<RequiredOption<?>> collectRequiredOptions(Gerrit gerrit) {
+ List<RequiredOption<?>> requiredOptions = new ArrayList<>();
+ requiredOptions.add(
+ new RequiredOption<String>("ref-database", "spanner", "useEmulator", "false"));
+ requiredOptions.add(
+ new RequiredOption<String>(
+ "ref-database",
+ "spanner",
+ "projectName",
+ gerrit.getSpec().getRefdb().getSpanner().getProjectName()));
+ requiredOptions.add(
+ new RequiredOption<String>(
+ "ref-database", "spanner", "credentialsPath", "/var/gerrit/etc/gcp-credentials.json"));
+ requiredOptions.add(
+ new RequiredOption<String>(
+ "ref-database",
+ "spanner",
+ "instance",
+ gerrit.getSpec().getRefdb().getSpanner().getInstance()));
+ requiredOptions.add(
+ new RequiredOption<String>(
+ "ref-database",
+ "spanner",
+ "database",
+ gerrit.getSpec().getRefdb().getSpanner().getDatabase()));
+ return requiredOptions;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/gerrit/config/ZookeeperRefDbPluginConfigBuilder.java b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/gerrit/config/ZookeeperRefDbPluginConfigBuilder.java
new file mode 100644
index 0000000..aabb726
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/java/com/google/gerrit/k8s/operator/v1alpha/gerrit/config/ZookeeperRefDbPluginConfigBuilder.java
@@ -0,0 +1,47 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.v1alpha.gerrit.config;
+
+import com.google.common.collect.ImmutableList;
+import com.google.gerrit.k8s.operator.gerrit.config.ConfigBuilder;
+import com.google.gerrit.k8s.operator.gerrit.config.RequiredOption;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import java.util.ArrayList;
+import java.util.List;
+
+public class ZookeeperRefDbPluginConfigBuilder extends ConfigBuilder {
+ public ZookeeperRefDbPluginConfigBuilder(Gerrit gerrit) {
+ super(
+ gerrit.getSpec().getConfigFiles().getOrDefault("zookeeper-refdb.config", ""),
+ ImmutableList.copyOf(collectRequiredOptions(gerrit)));
+ }
+
+ private static List<RequiredOption<?>> collectRequiredOptions(Gerrit gerrit) {
+ List<RequiredOption<?>> requiredOptions = new ArrayList<>();
+ requiredOptions.add(
+ new RequiredOption<String>(
+ "ref-database",
+ "zookeeper",
+ "connectString",
+ gerrit.getSpec().getRefdb().getZookeeper().getConnectString()));
+ requiredOptions.add(
+ new RequiredOption<String>(
+ "ref-database",
+ "zookeeper",
+ "rootNode",
+ gerrit.getSpec().getRefdb().getZookeeper().getRootNode()));
+ return requiredOptions;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/main/resources/META-INF/services/io.fabric8.kubernetes.api.model.KubernetesResource b/charts/k8s-gerrit/operator/src/main/resources/META-INF/services/io.fabric8.kubernetes.api.model.KubernetesResource
new file mode 100644
index 0000000..6fb15ac
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/resources/META-INF/services/io.fabric8.kubernetes.api.model.KubernetesResource
@@ -0,0 +1,5 @@
+com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster
+com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit
+com.google.gerrit.k8s.operator.v1alpha.api.model.gitgc.GitGarbageCollection
+com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.Receiver
+com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork
\ No newline at end of file
diff --git a/charts/k8s-gerrit/operator/src/main/resources/crd/emissary-crds.yaml b/charts/k8s-gerrit/operator/src/main/resources/crd/emissary-crds.yaml
new file mode 100644
index 0000000..bba936f
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/resources/crd/emissary-crds.yaml
@@ -0,0 +1,2589 @@
+# This file is downloaded from the Emissary repository on GitHub:
+# https://github.com/emissary-ingress/emissary/blob/master/manifests/emissary/emissary-crds.yaml.in
+#
+# Several modifications have been manually made:
+# 1. Only the `Mapping`, `TLSContext`, and `Host` CRDs have been kept from the source file. The source
+# file defines many CRDs that are not required by this operator project so the unnecessary CRDs have
+# been deleted.
+# 2. `v2ExplicitTLS` field has been removed from the Mapping CRD `v3alpha1` version. This is because
+# the "crd-to-java" generator plugin we use has a bug (https://github.com/fabric8io/kubernetes-client/issues/5457)
+# while converting enum types and the bug is triggered by the `v2ExplicitTLS` field. This field
+# may be added back in once we upgrade our fabric8 version to 6.8.x, where this bug is resolved.
+# 3. `ambassador_id` property is added to `Mapping`, `TLSContext`, and `Host` CRD version `v2`, by
+# copying it over from `v3`.
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.12.0
+ labels:
+ app.kubernetes.io/instance: emissary-apiext
+ app.kubernetes.io/managed-by: kubectl_apply_-f_emissary-apiext.yaml
+ app.kubernetes.io/name: emissary-apiext
+ app.kubernetes.io/part-of: emissary-apiext
+ name: mappings.getambassador.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ name: emissary-apiext
+ namespace: emissary-system
+ conversionReviewVersions:
+ - v1
+ group: getambassador.io
+ names:
+ categories:
+ - ambassador-crds
+ kind: Mapping
+ listKind: MappingList
+ plural: mappings
+ singular: mapping
+ preserveUnknownFields: false
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .spec.host
+ name: Source Host
+ type: string
+ - jsonPath: .spec.prefix
+ name: Source Prefix
+ type: string
+ - jsonPath: .spec.service
+ name: Dest Service
+ type: string
+ - jsonPath: .status.state
+ name: State
+ type: string
+ - jsonPath: .status.reason
+ name: Reason
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Mapping is the Schema for the mappings API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: MappingSpec defines the desired state of Mapping
+ properties:
+ add_linkerd_headers:
+ type: boolean
+ add_request_headers:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ add_response_headers:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ allow_upgrade:
+ description: "A case-insensitive list of the non-HTTP protocols to
+ allow \"upgrading\" to from HTTP via the \"Connection: upgrade\"
+ mechanism[1]. After the upgrade, Ambassador does not interpret
+ the traffic, and behaves similarly to how it does for TCPMappings.
+ \n [1]: https://tools.ietf.org/html/rfc7230#section-6.7 \n For example,
+ if your upstream service supports WebSockets, you would write \n
+ allow_upgrade: - websocket \n Or if your upstream service supports
+ upgrading from HTTP to SPDY (as the Kubernetes apiserver does for
+ `kubectl exec` functionality), you would write \n allow_upgrade:
+ - spdy/3.1"
+ items:
+ type: string
+ type: array
+ auth_context_extensions:
+ additionalProperties:
+ type: string
+ type: object
+ auto_host_rewrite:
+ type: boolean
+ bypass_auth:
+ type: boolean
+ bypass_error_response_overrides:
+ description: If true, bypasses any `error_response_overrides` set
+ on the Ambassador module.
+ type: boolean
+ case_sensitive:
+ type: boolean
+ circuit_breakers:
+ items:
+ properties:
+ max_connections:
+ type: integer
+ max_pending_requests:
+ type: integer
+ max_requests:
+ type: integer
+ max_retries:
+ type: integer
+ priority:
+ enum:
+ - default
+ - high
+ type: string
+ type: object
+ type: array
+ cluster_idle_timeout_ms:
+ type: integer
+ cluster_max_connection_lifetime_ms:
+ type: integer
+ cluster_tag:
+ type: string
+ connect_timeout_ms:
+ type: integer
+ cors:
+ properties:
+ credentials:
+ type: boolean
+ max_age:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ dns_type:
+ type: string
+ docs:
+ description: DocsInfo provides some extra information about the docs
+ for the Mapping (used by the Dev Portal)
+ properties:
+ display_name:
+ type: string
+ ignored:
+ type: boolean
+ path:
+ type: string
+ timeout_ms:
+ type: integer
+ url:
+ type: string
+ type: object
+ enable_ipv4:
+ type: boolean
+ enable_ipv6:
+ type: boolean
+ envoy_override:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ error_response_overrides:
+ description: Error response overrides for this Mapping. Replaces all
+ of the `error_response_overrides` set on the Ambassador module,
+ if any.
+ items:
+ description: A response rewrite for an HTTP error response
+ properties:
+ body:
+ description: The new response body
+ properties:
+ content_type:
+ description: The content type to set on the error response
+ body when using text_format or text_format_source. Defaults
+ to 'text/plain'.
+ type: string
+ json_format:
+ additionalProperties:
+ type: string
+ description: 'A JSON response with content-type: application/json.
+ The values can contain format text like in text_format.'
+ type: object
+ text_format:
+ description: A format string representing a text response
+ body. Content-Type can be set using the `content_type`
+ field below.
+ type: string
+ text_format_source:
+ description: A format string sourced from a file on the
+ Ambassador container. Useful for larger response bodies
+ that should not be placed inline in configuration.
+ properties:
+ filename:
+ description: The name of a file on the Ambassador pod
+ that contains a format text string.
+ type: string
+ type: object
+ type: object
+ on_status_code:
+ description: The status code to match on -- not a pointer because
+ it's required.
+ maximum: 599
+ minimum: 400
+ type: integer
+ required:
+ - body
+ - on_status_code
+ type: object
+ minItems: 1
+ type: array
+ grpc:
+ type: boolean
+ headers:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ host:
+ type: string
+ host_redirect:
+ type: boolean
+ host_regex:
+ type: boolean
+ host_rewrite:
+ type: string
+ idle_timeout_ms:
+ type: integer
+ keepalive:
+ properties:
+ idle_time:
+ type: integer
+ interval:
+ type: integer
+ probes:
+ type: integer
+ type: object
+ labels:
+ additionalProperties:
+ description: A MappingLabelGroupsArray is an array of MappingLabelGroups.
+ I know, complex.
+ items:
+ description: 'A MappingLabelGroup is a single element of a MappingLabelGroupsArray:
+ a second map, where the key is a human-readable name that identifies
+ the group.'
+ maxProperties: 1
+ minProperties: 1
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: array
+ description: A DomainMap is the overall Mapping.spec.Labels type.
+ It maps domains (kind of like namespaces for Mapping labels) to
+ arrays of label groups.
+ type: object
+ load_balancer:
+ properties:
+ cookie:
+ properties:
+ name:
+ type: string
+ path:
+ type: string
+ ttl:
+ type: string
+ required:
+ - name
+ type: object
+ header:
+ type: string
+ policy:
+ enum:
+ - round_robin
+ - ring_hash
+ - maglev
+ - least_request
+ type: string
+ source_ip:
+ type: boolean
+ required:
+ - policy
+ type: object
+ method:
+ type: string
+ method_regex:
+ type: boolean
+ modules:
+ items:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: array
+ outlier_detection:
+ type: string
+ path_redirect:
+ description: Path replacement to use when generating an HTTP redirect.
+ Used with `host_redirect`.
+ type: string
+ precedence:
+ type: integer
+ prefix:
+ type: string
+ prefix_exact:
+ type: boolean
+ prefix_redirect:
+ description: Prefix rewrite to use when generating an HTTP redirect.
+ Used with `host_redirect`.
+ type: string
+ prefix_regex:
+ type: boolean
+ priority:
+ type: string
+ query_parameters:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ redirect_response_code:
+ description: The response code to use when generating an HTTP redirect.
+ Defaults to 301. Used with `host_redirect`.
+ enum:
+ - 301
+ - 302
+ - 303
+ - 307
+ - 308
+ type: integer
+ regex_headers:
+ additionalProperties:
+ type: string
+ type: object
+ regex_query_parameters:
+ additionalProperties:
+ type: string
+ type: object
+ regex_redirect:
+ description: Prefix regex rewrite to use when generating an HTTP redirect.
+ Used with `host_redirect`.
+ properties:
+ pattern:
+ type: string
+ substitution:
+ type: string
+ type: object
+ regex_rewrite:
+ properties:
+ pattern:
+ type: string
+ substitution:
+ type: string
+ type: object
+ resolver:
+ type: string
+ respect_dns_ttl:
+ type: boolean
+ retry_policy:
+ properties:
+ num_retries:
+ type: integer
+ per_try_timeout:
+ type: string
+ retry_on:
+ enum:
+ - 5xx
+ - gateway-error
+ - connect-failure
+ - retriable-4xx
+ - refused-stream
+ - retriable-status-codes
+ type: string
+ type: object
+ rewrite:
+ type: string
+ service:
+ type: string
+ shadow:
+ type: boolean
+ timeout_ms:
+ description: The timeout for requests that use this Mapping. Overrides
+ `cluster_request_timeout_ms` set on the Ambassador Module, if it
+ exists.
+ type: integer
+ use_websocket:
+ description: 'use_websocket is deprecated, and is equivlaent to setting
+ `allow_upgrade: ["websocket"]`'
+ type: boolean
+ v3StatsName:
+ type: string
+ v3health_checks:
+ items:
+ description: HealthCheck specifies settings for performing active
+ health checking on upstreams
+ properties:
+ health_check:
+ description: Configuration for where the healthcheck request
+ should be made to
+ maxProperties: 1
+ minProperties: 1
+ properties:
+ grpc:
+ description: HealthCheck for gRPC upstreams. Only one of
+ grpc_health_check or http_health_check may be specified
+ properties:
+ authority:
+ description: The value of the :authority header in the
+ gRPC health check request. If left empty the upstream
+ name will be used.
+ type: string
+ upstream_name:
+ description: The upstream name parameter which will
+ be sent to gRPC service in the health check message
+ type: string
+ required:
+ - upstream_name
+ type: object
+ http:
+ description: HealthCheck for HTTP upstreams. Only one of
+ http_health_check or grpc_health_check may be specified
+ properties:
+ add_request_headers:
+ additionalProperties:
+ properties:
+ append:
+ type: boolean
+ v2Representation:
+ enum:
+ - ""
+ - string
+ - "null"
+ type: string
+ value:
+ type: string
+ type: object
+ type: object
+ expected_statuses:
+ items:
+ description: A range of response statuses from Start
+ to End inclusive
+ properties:
+ max:
+ description: End of the statuses to include. Must
+ be between 100 and 599 (inclusive)
+ maximum: 599
+ minimum: 100
+ type: integer
+ min:
+ description: Start of the statuses to include.
+ Must be between 100 and 599 (inclusive)
+ maximum: 599
+ minimum: 100
+ type: integer
+ required:
+ - max
+ - min
+ type: object
+ type: array
+ hostname:
+ type: string
+ path:
+ type: string
+ remove_request_headers:
+ items:
+ type: string
+ type: array
+ required:
+ - path
+ type: object
+ type: object
+ healthy_threshold:
+ description: Number of expected responses for the upstream to
+ be considered healthy. Defaults to 1.
+ type: integer
+ interval:
+ description: Interval between health checks. Defaults to every
+ 5 seconds.
+ type: string
+ timeout:
+ description: Timeout for connecting to the health checking endpoint.
+ Defaults to 3 seconds.
+ type: string
+ unhealthy_threshold:
+ description: Number of non-expected responses for the upstream
+ to be considered unhealthy. A single 503 will mark the upstream
+ as unhealthy regardless of the threshold. Defaults to 2.
+ type: integer
+ required:
+ - health_check
+ type: object
+ minItems: 1
+ type: array
+ weight:
+ type: integer
+ required:
+ - prefix
+ - service
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ description: MappingStatus defines the observed state of Mapping
+ properties:
+ reason:
+ type: string
+ state:
+ enum:
+ - ""
+ - Inactive
+ - Running
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - jsonPath: .spec.host
+ name: Source Host
+ type: string
+ - jsonPath: .spec.prefix
+ name: Source Prefix
+ type: string
+ - jsonPath: .spec.service
+ name: Dest Service
+ type: string
+ - jsonPath: .status.state
+ name: State
+ type: string
+ - jsonPath: .status.reason
+ name: Reason
+ type: string
+ name: v2
+ schema:
+ openAPIV3Schema:
+ description: Mapping is the Schema for the mappings API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: MappingSpec defines the desired state of Mapping
+ properties:
+ add_linkerd_headers:
+ type: boolean
+ add_request_headers:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ add_response_headers:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ allow_upgrade:
+ description: "A case-insensitive list of the non-HTTP protocols to
+ allow \"upgrading\" to from HTTP via the \"Connection: upgrade\"
+ mechanism[1]. After the upgrade, Ambassador does not interpret
+ the traffic, and behaves similarly to how it does for TCPMappings.
+ \n [1]: https://tools.ietf.org/html/rfc7230#section-6.7 \n For example,
+ if your upstream service supports WebSockets, you would write \n
+ allow_upgrade: - websocket \n Or if your upstream service supports
+ upgrading from HTTP to SPDY (as the Kubernetes apiserver does for
+ `kubectl exec` functionality), you would write \n allow_upgrade:
+ - spdy/3.1"
+ items:
+ type: string
+ type: array
+ # [operator] added manually by coping over from v3alpha1
+ ambassador_id:
+ description: "AmbassadorID declares which Ambassador instances should
+ pay attention to this resource. If no value is provided, the default
+ is: \n ambassador_id: - \"default\""
+ items:
+ type: string
+ type: array
+ auth_context_extensions:
+ additionalProperties:
+ type: string
+ type: object
+ auto_host_rewrite:
+ type: boolean
+ bypass_auth:
+ type: boolean
+ bypass_error_response_overrides:
+ description: If true, bypasses any `error_response_overrides` set
+ on the Ambassador module.
+ type: boolean
+ case_sensitive:
+ type: boolean
+ circuit_breakers:
+ items:
+ properties:
+ max_connections:
+ type: integer
+ max_pending_requests:
+ type: integer
+ max_requests:
+ type: integer
+ max_retries:
+ type: integer
+ priority:
+ enum:
+ - default
+ - high
+ type: string
+ type: object
+ type: array
+ cluster_idle_timeout_ms:
+ type: integer
+ cluster_max_connection_lifetime_ms:
+ type: integer
+ cluster_tag:
+ type: string
+ connect_timeout_ms:
+ type: integer
+ cors:
+ properties:
+ credentials:
+ type: boolean
+ max_age:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ dns_type:
+ type: string
+ docs:
+ description: DocsInfo provides some extra information about the docs
+ for the Mapping (used by the Dev Portal)
+ properties:
+ display_name:
+ type: string
+ ignored:
+ type: boolean
+ path:
+ type: string
+ timeout_ms:
+ type: integer
+ url:
+ type: string
+ type: object
+ enable_ipv4:
+ type: boolean
+ enable_ipv6:
+ type: boolean
+ envoy_override:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ error_response_overrides:
+ description: Error response overrides for this Mapping. Replaces all
+ of the `error_response_overrides` set on the Ambassador module,
+ if any.
+ items:
+ description: A response rewrite for an HTTP error response
+ properties:
+ body:
+ description: The new response body
+ properties:
+ content_type:
+ description: The content type to set on the error response
+ body when using text_format or text_format_source. Defaults
+ to 'text/plain'.
+ type: string
+ json_format:
+ additionalProperties:
+ type: string
+ description: 'A JSON response with content-type: application/json.
+ The values can contain format text like in text_format.'
+ type: object
+ text_format:
+ description: A format string representing a text response
+ body. Content-Type can be set using the `content_type`
+ field below.
+ type: string
+ text_format_source:
+ description: A format string sourced from a file on the
+ Ambassador container. Useful for larger response bodies
+ that should not be placed inline in configuration.
+ properties:
+ filename:
+ description: The name of a file on the Ambassador pod
+ that contains a format text string.
+ type: string
+ type: object
+ type: object
+ on_status_code:
+ description: The status code to match on -- not a pointer because
+ it's required.
+ maximum: 599
+ minimum: 400
+ type: integer
+ required:
+ - body
+ - on_status_code
+ type: object
+ minItems: 1
+ type: array
+ grpc:
+ type: boolean
+ headers:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ host:
+ type: string
+ host_redirect:
+ type: boolean
+ host_regex:
+ type: boolean
+ host_rewrite:
+ type: string
+ idle_timeout_ms:
+ type: integer
+ keepalive:
+ properties:
+ idle_time:
+ type: integer
+ interval:
+ type: integer
+ probes:
+ type: integer
+ type: object
+ labels:
+ additionalProperties:
+ description: A MappingLabelGroupsArray is an array of MappingLabelGroups.
+ I know, complex.
+ items:
+ description: 'A MappingLabelGroup is a single element of a MappingLabelGroupsArray:
+ a second map, where the key is a human-readable name that identifies
+ the group.'
+ maxProperties: 1
+ minProperties: 1
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: array
+ description: A DomainMap is the overall Mapping.spec.Labels type.
+ It maps domains (kind of like namespaces for Mapping labels) to
+ arrays of label groups.
+ type: object
+ load_balancer:
+ properties:
+ cookie:
+ properties:
+ name:
+ type: string
+ path:
+ type: string
+ ttl:
+ type: string
+ required:
+ - name
+ type: object
+ header:
+ type: string
+ policy:
+ enum:
+ - round_robin
+ - ring_hash
+ - maglev
+ - least_request
+ type: string
+ source_ip:
+ type: boolean
+ required:
+ - policy
+ type: object
+ method:
+ type: string
+ method_regex:
+ type: boolean
+ modules:
+ items:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: array
+ outlier_detection:
+ type: string
+ path_redirect:
+ description: Path replacement to use when generating an HTTP redirect.
+ Used with `host_redirect`.
+ type: string
+ precedence:
+ type: integer
+ prefix:
+ type: string
+ prefix_exact:
+ type: boolean
+ prefix_redirect:
+ description: Prefix rewrite to use when generating an HTTP redirect.
+ Used with `host_redirect`.
+ type: string
+ prefix_regex:
+ type: boolean
+ priority:
+ type: string
+ query_parameters:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ redirect_response_code:
+ description: The response code to use when generating an HTTP redirect.
+ Defaults to 301. Used with `host_redirect`.
+ enum:
+ - 301
+ - 302
+ - 303
+ - 307
+ - 308
+ type: integer
+ regex_headers:
+ additionalProperties:
+ type: string
+ type: object
+ regex_query_parameters:
+ additionalProperties:
+ type: string
+ type: object
+ regex_redirect:
+ description: Prefix regex rewrite to use when generating an HTTP redirect.
+ Used with `host_redirect`.
+ properties:
+ pattern:
+ type: string
+ substitution:
+ type: string
+ type: object
+ regex_rewrite:
+ properties:
+ pattern:
+ type: string
+ substitution:
+ type: string
+ type: object
+ resolver:
+ type: string
+ respect_dns_ttl:
+ type: boolean
+ retry_policy:
+ properties:
+ num_retries:
+ type: integer
+ per_try_timeout:
+ type: string
+ retry_on:
+ enum:
+ - 5xx
+ - gateway-error
+ - connect-failure
+ - retriable-4xx
+ - refused-stream
+ - retriable-status-codes
+ type: string
+ type: object
+ rewrite:
+ type: string
+ service:
+ type: string
+ shadow:
+ type: boolean
+ timeout_ms:
+ description: The timeout for requests that use this Mapping. Overrides
+ `cluster_request_timeout_ms` set on the Ambassador Module, if it
+ exists.
+ type: integer
+ use_websocket:
+ description: 'use_websocket is deprecated, and is equivlaent to setting
+ `allow_upgrade: ["websocket"]`'
+ type: boolean
+ v3StatsName:
+ type: string
+ v3health_checks:
+ items:
+ description: HealthCheck specifies settings for performing active
+ health checking on upstreams
+ properties:
+ health_check:
+ description: Configuration for where the healthcheck request
+ should be made to
+ maxProperties: 1
+ minProperties: 1
+ properties:
+ grpc:
+ description: HealthCheck for gRPC upstreams. Only one of
+ grpc_health_check or http_health_check may be specified
+ properties:
+ authority:
+ description: The value of the :authority header in the
+ gRPC health check request. If left empty the upstream
+ name will be used.
+ type: string
+ upstream_name:
+ description: The upstream name parameter which will
+ be sent to gRPC service in the health check message
+ type: string
+ required:
+ - upstream_name
+ type: object
+ http:
+ description: HealthCheck for HTTP upstreams. Only one of
+ http_health_check or grpc_health_check may be specified
+ properties:
+ add_request_headers:
+ additionalProperties:
+ properties:
+ append:
+ type: boolean
+ v2Representation:
+ enum:
+ - ""
+ - string
+ - "null"
+ type: string
+ value:
+ type: string
+ type: object
+ type: object
+ expected_statuses:
+ items:
+ description: A range of response statuses from Start
+ to End inclusive
+ properties:
+ max:
+ description: End of the statuses to include. Must
+ be between 100 and 599 (inclusive)
+ maximum: 599
+ minimum: 100
+ type: integer
+ min:
+ description: Start of the statuses to include.
+ Must be between 100 and 599 (inclusive)
+ maximum: 599
+ minimum: 100
+ type: integer
+ required:
+ - max
+ - min
+ type: object
+ type: array
+ hostname:
+ type: string
+ path:
+ type: string
+ remove_request_headers:
+ items:
+ type: string
+ type: array
+ required:
+ - path
+ type: object
+ type: object
+ healthy_threshold:
+ description: Number of expected responses for the upstream to
+ be considered healthy. Defaults to 1.
+ type: integer
+ interval:
+ description: Interval between health checks. Defaults to every
+ 5 seconds.
+ type: string
+ timeout:
+ description: Timeout for connecting to the health checking endpoint.
+ Defaults to 3 seconds.
+ type: string
+ unhealthy_threshold:
+ description: Number of non-expected responses for the upstream
+ to be considered unhealthy. A single 503 will mark the upstream
+ as unhealthy regardless of the threshold. Defaults to 2.
+ type: integer
+ required:
+ - health_check
+ type: object
+ minItems: 1
+ type: array
+ weight:
+ type: integer
+ required:
+ - prefix
+ - service
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ description: MappingStatus defines the observed state of Mapping
+ properties:
+ reason:
+ type: string
+ state:
+ enum:
+ - ""
+ - Inactive
+ - Running
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - jsonPath: .spec.host
+ name: Source Host
+ type: string
+ - jsonPath: .spec.prefix
+ name: Source Prefix
+ type: string
+ - jsonPath: .spec.service
+ name: Dest Service
+ type: string
+ - jsonPath: .status.state
+ name: State
+ type: string
+ - jsonPath: .status.reason
+ name: Reason
+ type: string
+ name: v3alpha1
+ schema:
+ openAPIV3Schema:
+ description: Mapping is the Schema for the mappings API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: MappingSpec defines the desired state of Mapping
+ properties:
+ add_linkerd_headers:
+ type: boolean
+ add_request_headers:
+ additionalProperties:
+ properties:
+ append:
+ type: boolean
+ v2Representation:
+ enum:
+ - ""
+ - string
+ - "null"
+ type: string
+ value:
+ type: string
+ type: object
+ type: object
+ add_response_headers:
+ additionalProperties:
+ properties:
+ append:
+ type: boolean
+ v2Representation:
+ enum:
+ - ""
+ - string
+ - "null"
+ type: string
+ value:
+ type: string
+ type: object
+ type: object
+ allow_upgrade:
+ description: "A case-insensitive list of the non-HTTP protocols to
+ allow \"upgrading\" to from HTTP via the \"Connection: upgrade\"
+ mechanism[1]. After the upgrade, Ambassador does not interpret
+ the traffic, and behaves similarly to how it does for TCPMappings.
+ \n [1]: https://tools.ietf.org/html/rfc7230#section-6.7 \n For example,
+ if your upstream service supports WebSockets, you would write \n
+ allow_upgrade: - websocket \n Or if your upstream service supports
+ upgrading from HTTP to SPDY (as the Kubernetes apiserver does for
+ `kubectl exec` functionality), you would write \n allow_upgrade:
+ - spdy/3.1"
+ items:
+ type: string
+ type: array
+ ambassador_id:
+ description: "AmbassadorID declares which Ambassador instances should
+ pay attention to this resource. If no value is provided, the default
+ is: \n ambassador_id: - \"default\""
+ items:
+ type: string
+ type: array
+ auth_context_extensions:
+ additionalProperties:
+ type: string
+ type: object
+ auto_host_rewrite:
+ type: boolean
+ bypass_auth:
+ type: boolean
+ bypass_error_response_overrides:
+ description: If true, bypasses any `error_response_overrides` set
+ on the Ambassador module.
+ type: boolean
+ case_sensitive:
+ type: boolean
+ circuit_breakers:
+ items:
+ properties:
+ max_connections:
+ type: integer
+ max_pending_requests:
+ type: integer
+ max_requests:
+ type: integer
+ max_retries:
+ type: integer
+ priority:
+ enum:
+ - default
+ - high
+ type: string
+ type: object
+ type: array
+ cluster_idle_timeout_ms:
+ type: integer
+ cluster_max_connection_lifetime_ms:
+ type: integer
+ cluster_tag:
+ type: string
+ connect_timeout_ms:
+ type: integer
+ cors:
+ properties:
+ credentials:
+ type: boolean
+ exposed_headers:
+ items:
+ type: string
+ type: array
+ headers:
+ items:
+ type: string
+ type: array
+ max_age:
+ type: string
+ methods:
+ items:
+ type: string
+ type: array
+ origins:
+ items:
+ type: string
+ type: array
+ v2CommaSeparatedOrigins:
+ type: boolean
+ type: object
+ dns_type:
+ type: string
+ docs:
+ description: DocsInfo provides some extra information about the docs
+ for the Mapping. Docs is used by both the agent and the DevPortal.
+ properties:
+ display_name:
+ type: string
+ ignored:
+ type: boolean
+ path:
+ type: string
+ timeout_ms:
+ type: integer
+ url:
+ type: string
+ type: object
+ enable_ipv4:
+ type: boolean
+ enable_ipv6:
+ type: boolean
+ envoy_override:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ error_response_overrides:
+ description: Error response overrides for this Mapping. Replaces all
+ of the `error_response_overrides` set on the Ambassador module,
+ if any.
+ items:
+ description: A response rewrite for an HTTP error response
+ properties:
+ body:
+ description: The new response body
+ properties:
+ content_type:
+ description: The content type to set on the error response
+ body when using text_format or text_format_source. Defaults
+ to 'text/plain'.
+ type: string
+ json_format:
+ additionalProperties:
+ type: string
+ description: 'A JSON response with content-type: application/json.
+ The values can contain format text like in text_format.'
+ type: object
+ text_format:
+ description: A format string representing a text response
+ body. Content-Type can be set using the `content_type`
+ field below.
+ type: string
+ text_format_source:
+ description: A format string sourced from a file on the
+ Ambassador container. Useful for larger response bodies
+ that should not be placed inline in configuration.
+ properties:
+ filename:
+ description: The name of a file on the Ambassador pod
+ that contains a format text string.
+ type: string
+ type: object
+ type: object
+ on_status_code:
+ description: The status code to match on -- not a pointer because
+ it's required.
+ maximum: 599
+ minimum: 400
+ type: integer
+ required:
+ - body
+ - on_status_code
+ type: object
+ minItems: 1
+ type: array
+ grpc:
+ type: boolean
+ headers:
+ additionalProperties:
+ type: string
+ type: object
+ health_checks:
+ items:
+ description: HealthCheck specifies settings for performing active
+ health checking on upstreams
+ properties:
+ health_check:
+ description: Configuration for where the healthcheck request
+ should be made to
+ maxProperties: 1
+ minProperties: 1
+ properties:
+ grpc:
+ description: HealthCheck for gRPC upstreams. Only one of
+ grpc_health_check or http_health_check may be specified
+ properties:
+ authority:
+ description: The value of the :authority header in the
+ gRPC health check request. If left empty the upstream
+ name will be used.
+ type: string
+ upstream_name:
+ description: The upstream name parameter which will
+ be sent to gRPC service in the health check message
+ type: string
+ required:
+ - upstream_name
+ type: object
+ http:
+ description: HealthCheck for HTTP upstreams. Only one of
+ http_health_check or grpc_health_check may be specified
+ properties:
+ add_request_headers:
+ additionalProperties:
+ properties:
+ append:
+ type: boolean
+ v2Representation:
+ enum:
+ - ""
+ - string
+ - "null"
+ type: string
+ value:
+ type: string
+ type: object
+ type: object
+ expected_statuses:
+ items:
+ description: A range of response statuses from Start
+ to End inclusive
+ properties:
+ max:
+ description: End of the statuses to include. Must
+ be between 100 and 599 (inclusive)
+ maximum: 599
+ minimum: 100
+ type: integer
+ min:
+ description: Start of the statuses to include.
+ Must be between 100 and 599 (inclusive)
+ maximum: 599
+ minimum: 100
+ type: integer
+ required:
+ - max
+ - min
+ type: object
+ type: array
+ hostname:
+ type: string
+ path:
+ type: string
+ remove_request_headers:
+ items:
+ type: string
+ type: array
+ required:
+ - path
+ type: object
+ type: object
+ healthy_threshold:
+ description: Number of expected responses for the upstream to
+ be considered healthy. Defaults to 1.
+ type: integer
+ interval:
+ description: Interval between health checks. Defaults to every
+ 5 seconds.
+ type: string
+ timeout:
+ description: Timeout for connecting to the health checking endpoint.
+ Defaults to 3 seconds.
+ type: string
+ unhealthy_threshold:
+ description: Number of non-expected responses for the upstream
+ to be considered unhealthy. A single 503 will mark the upstream
+ as unhealthy regardless of the threshold. Defaults to 2.
+ type: integer
+ required:
+ - health_check
+ type: object
+ minItems: 1
+ type: array
+ host:
+ description: "Exact match for the hostname of a request if HostRegex
+ is false; regex match for the hostname if HostRegex is true. \n
+ Host specifies both a match for the ':authority' header of a request,
+ as well as a match criterion for Host CRDs: a Mapping that specifies
+ Host will not associate with a Host that doesn't have a matching
+ Hostname. \n If both Host and Hostname are set, an error is logged,
+ Host is ignored, and Hostname is used. \n DEPRECATED: Host is either
+ an exact match or a regex, depending on HostRegex. Use HostName
+ instead."
+ type: string
+ host_redirect:
+ type: boolean
+ host_regex:
+ description: 'DEPRECATED: Host is either an exact match or a regex,
+ depending on HostRegex. Use HostName instead.'
+ type: boolean
+ host_rewrite:
+ type: string
+ hostname:
+ description: "Hostname is a DNS glob specifying the hosts to which
+ this Mapping applies. \n Hostname specifies both a match for the
+ ':authority' header of a request, as well as a match criterion for
+ Host CRDs: a Mapping that specifies Hostname will not associate
+ with a Host that doesn't have a matching Hostname. \n If both Host
+ and Hostname are set, an error is logged, Host is ignored, and Hostname
+ is used."
+ type: string
+ idle_timeout_ms:
+ type: integer
+ keepalive:
+ properties:
+ idle_time:
+ type: integer
+ interval:
+ type: integer
+ probes:
+ type: integer
+ type: object
+ labels:
+ additionalProperties:
+ description: A MappingLabelGroupsArray is an array of MappingLabelGroups.
+ I know, complex.
+ items:
+ additionalProperties:
+ description: 'A MappingLabelsArray is the value in the MappingLabelGroup:
+ an array of label specifiers.'
+ items:
+ description: "A MappingLabelSpecifier (finally!) defines a
+ single label. \n This mimics envoy/config/route/v3/route_components.proto:RateLimit:Action:action_specifier."
+ maxProperties: 1
+ minProperties: 1
+ properties:
+ destination_cluster:
+ description: Sets the label "destination_cluster=«Envoy
+ destination cluster name»".
+ properties:
+ key:
+ enum:
+ - destination_cluster
+ type: string
+ required:
+ - key
+ type: object
+ generic_key:
+ description: Sets the label "«key»=«value»" (where by
+ default «key» is "generic_key").
+ properties:
+ key:
+ description: The default is "generic_key".
+ type: string
+ v2Shorthand:
+ type: boolean
+ value:
+ type: string
+ required:
+ - value
+ type: object
+ remote_address:
+ description: Sets the label "remote_address=«IP address
+ of the client»".
+ properties:
+ key:
+ enum:
+ - remote_address
+ type: string
+ required:
+ - key
+ type: object
+ request_headers:
+ description: If the «header_name» header is set, then
+ set the label "«key»=«Value of the «header_name» header»";
+ otherwise skip applying this label group.
+ properties:
+ header_name:
+ type: string
+ key:
+ type: string
+ omit_if_not_present:
+ type: boolean
+ required:
+ - header_name
+ - key
+ type: object
+ source_cluster:
+ description: Sets the label "source_cluster=«Envoy source
+ cluster name»".
+ properties:
+ key:
+ enum:
+ - source_cluster
+ type: string
+ required:
+ - key
+ type: object
+ type: object
+ type: array
+ description: 'A MappingLabelGroup is a single element of a MappingLabelGroupsArray:
+ a second map, where the key is a human-readable name that identifies
+ the group.'
+ maxProperties: 1
+ minProperties: 1
+ type: object
+ type: array
+ description: A DomainMap is the overall Mapping.spec.Labels type.
+ It maps domains (kind of like namespaces for Mapping labels) to
+ arrays of label groups.
+ type: object
+ load_balancer:
+ properties:
+ cookie:
+ properties:
+ name:
+ type: string
+ path:
+ type: string
+ ttl:
+ type: string
+ required:
+ - name
+ type: object
+ header:
+ type: string
+ policy:
+ enum:
+ - round_robin
+ - ring_hash
+ - maglev
+ - least_request
+ type: string
+ source_ip:
+ type: boolean
+ required:
+ - policy
+ type: object
+ method:
+ type: string
+ method_regex:
+ type: boolean
+ modules:
+ items:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: array
+ outlier_detection:
+ type: string
+ path_redirect:
+ description: Path replacement to use when generating an HTTP redirect.
+ Used with `host_redirect`.
+ type: string
+ precedence:
+ type: integer
+ prefix:
+ type: string
+ prefix_exact:
+ type: boolean
+ prefix_redirect:
+ description: Prefix rewrite to use when generating an HTTP redirect.
+ Used with `host_redirect`.
+ type: string
+ prefix_regex:
+ type: boolean
+ priority:
+ type: string
+ query_parameters:
+ additionalProperties:
+ type: string
+ type: object
+ redirect_response_code:
+ description: The response code to use when generating an HTTP redirect.
+ Defaults to 301. Used with `host_redirect`.
+ enum:
+ - 301
+ - 302
+ - 303
+ - 307
+ - 308
+ type: integer
+ regex_headers:
+ additionalProperties:
+ type: string
+ type: object
+ regex_query_parameters:
+ additionalProperties:
+ type: string
+ type: object
+ regex_redirect:
+ description: Prefix regex rewrite to use when generating an HTTP redirect.
+ Used with `host_redirect`.
+ properties:
+ pattern:
+ type: string
+ substitution:
+ type: string
+ type: object
+ regex_rewrite:
+ properties:
+ pattern:
+ type: string
+ substitution:
+ type: string
+ type: object
+ remove_request_headers:
+ items:
+ type: string
+ type: array
+ remove_response_headers:
+ items:
+ type: string
+ type: array
+ resolver:
+ type: string
+ respect_dns_ttl:
+ type: boolean
+ retry_policy:
+ properties:
+ num_retries:
+ type: integer
+ per_try_timeout:
+ type: string
+ retry_on:
+ enum:
+ - 5xx
+ - gateway-error
+ - connect-failure
+ - retriable-4xx
+ - refused-stream
+ - retriable-status-codes
+ type: string
+ type: object
+ rewrite:
+ type: string
+ service:
+ type: string
+ shadow:
+ type: boolean
+ stats_name:
+ type: string
+ timeout_ms:
+ description: The timeout for requests that use this Mapping. Overrides
+ `cluster_request_timeout_ms` set on the Ambassador Module, if it
+ exists.
+ type: integer
+ tls:
+ type: string
+ use_websocket:
+ description: 'use_websocket is deprecated, and is equivlaent to setting
+ `allow_upgrade: ["websocket"]`'
+ type: boolean
+ v2BoolHeaders:
+ items:
+ type: string
+ type: array
+ v2BoolQueryParameters:
+ items:
+ type: string
+ type: array
+ # TODO: uncomment when [bug](https://github.com/fabric8io/kubernetes-client/issues/5457) is resolved
+ # v2ExplicitTLS:
+ # description: V2ExplicitTLS controls some vanity/stylistic elements
+ # when converting from v3alpha1 to v2. The values in an V2ExplicitTLS
+ # should not in any way affect the runtime operation of Emissary;
+ # except that it may affect internal names in the Envoy config, which
+ # may in turn affect stats names. But it should not affect any end-user
+ # observable behavior.
+ # properties:
+ # serviceScheme:
+ # description: "ServiceScheme specifies how to spell and capitalize
+ # the scheme-part of the service URL. \n Acceptable values are
+ # \"http://\" (case-insensitive), \"https://\" (case-insensitive),
+ # or \"\". The value is used if it agrees with whether or not
+ # this resource enables TLS origination, or if something else
+ # in the resource overrides the scheme."
+ # pattern: ^([hH][tT][tT][pP][sS]?://)?$
+ # type: string
+ # tls:
+ # description: "TLS controls whether and how to represent the \"tls\"
+ # field when its value could be implied by the \"service\" field.
+ # \ In v2, there were a lot of different ways to spell an \"empty\"
+ # value, and this field specifies which way to spell it (and will
+ # therefore only be used if the value will indeed be empty). \n
+ # | Value | Representation | Meaning
+ # of representation | |--------------+---------------------------------------+------------------------------------|
+ # | \"\" | omit the field | defer
+ # to service (no TLSContext) | | \"null\" | store an explicit
+ # \"null\" in the field | defer to service (no TLSContext) |
+ # | \"string\" | store an empty string in the field | defer
+ # to service (no TLSContext) | | \"bool:false\" | store a Boolean
+ # \"false\" in the field | defer to service (no TLSContext) |
+ # | \"bool:true\" | store a Boolean \"true\" in the field |
+ # originate TLS (no TLSContext) | \n If the meaning of the
+ # representation contradicts anything else (if a TLSContext is
+ # to be used, or in the case of \"bool:true\" if TLS is not to
+ # be originated), then this field is ignored."
+ # enum:
+ # - ""
+ # - "null"
+ # - bool:true
+ # - bool:false
+ # - string
+ # type: string
+ # type: object
+ weight:
+ type: integer
+ required:
+ - prefix
+ - service
+ type: object
+ status:
+ description: MappingStatus defines the observed state of Mapping
+ properties:
+ reason:
+ type: string
+ state:
+ enum:
+ - ""
+ - Inactive
+ - Running
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.12.0
+ labels:
+ app.kubernetes.io/instance: emissary-apiext
+ app.kubernetes.io/managed-by: kubectl_apply_-f_emissary-apiext.yaml
+ app.kubernetes.io/name: emissary-apiext
+ app.kubernetes.io/part-of: emissary-apiext
+ name: tlscontexts.getambassador.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ name: emissary-apiext
+ namespace: emissary-system
+ conversionReviewVersions:
+ - v1
+ group: getambassador.io
+ names:
+ categories:
+ - ambassador-crds
+ kind: TLSContext
+ listKind: TLSContextList
+ plural: tlscontexts
+ singular: tlscontext
+ preserveUnknownFields: false
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: TLSContext is the Schema for the tlscontexts API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: TLSContextSpec defines the desired state of TLSContext
+ properties:
+ alpn_protocols:
+ type: string
+ ca_secret:
+ type: string
+ cacert_chain_file:
+ type: string
+ cert_chain_file:
+ type: string
+ cert_required:
+ type: boolean
+ cipher_suites:
+ items:
+ type: string
+ type: array
+ ecdh_curves:
+ items:
+ type: string
+ type: array
+ hosts:
+ items:
+ type: string
+ type: array
+ max_tls_version:
+ enum:
+ - v1.0
+ - v1.1
+ - v1.2
+ - v1.3
+ type: string
+ min_tls_version:
+ enum:
+ - v1.0
+ - v1.1
+ - v1.2
+ - v1.3
+ type: string
+ private_key_file:
+ type: string
+ redirect_cleartext_from:
+ type: integer
+ secret:
+ type: string
+ secret_namespacing:
+ type: boolean
+ sni:
+ type: string
+ v3CRLSecret:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ - name: v2
+ schema:
+ openAPIV3Schema:
+ description: TLSContext is the Schema for the tlscontexts API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: TLSContextSpec defines the desired state of TLSContext
+ properties:
+ alpn_protocols:
+ type: string
+ # [operator] added manually by coping over from v3alpha1
+ ambassador_id:
+ description: "AmbassadorID declares which Ambassador instances should
+ pay attention to this resource. If no value is provided, the default
+ is: \n ambassador_id: - \"default\""
+ items:
+ type: string
+ type: array
+ ca_secret:
+ type: string
+ cacert_chain_file:
+ type: string
+ cert_chain_file:
+ type: string
+ cert_required:
+ type: boolean
+ cipher_suites:
+ items:
+ type: string
+ type: array
+ ecdh_curves:
+ items:
+ type: string
+ type: array
+ hosts:
+ items:
+ type: string
+ type: array
+ max_tls_version:
+ enum:
+ - v1.0
+ - v1.1
+ - v1.2
+ - v1.3
+ type: string
+ min_tls_version:
+ enum:
+ - v1.0
+ - v1.1
+ - v1.2
+ - v1.3
+ type: string
+ private_key_file:
+ type: string
+ redirect_cleartext_from:
+ type: integer
+ secret:
+ type: string
+ secret_namespacing:
+ type: boolean
+ sni:
+ type: string
+ v3CRLSecret:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: true
+ - name: v3alpha1
+ schema:
+ openAPIV3Schema:
+ description: TLSContext is the Schema for the tlscontexts API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: TLSContextSpec defines the desired state of TLSContext
+ properties:
+ alpn_protocols:
+ type: string
+ ambassador_id:
+ description: "AmbassadorID declares which Ambassador instances should
+ pay attention to this resource. If no value is provided, the default
+ is: \n ambassador_id: - \"default\""
+ items:
+ type: string
+ type: array
+ ca_secret:
+ type: string
+ cacert_chain_file:
+ type: string
+ cert_chain_file:
+ type: string
+ cert_required:
+ type: boolean
+ cipher_suites:
+ items:
+ type: string
+ type: array
+ crl_secret:
+ type: string
+ ecdh_curves:
+ items:
+ type: string
+ type: array
+ hosts:
+ items:
+ type: string
+ type: array
+ max_tls_version:
+ enum:
+ - v1.0
+ - v1.1
+ - v1.2
+ - v1.3
+ type: string
+ min_tls_version:
+ enum:
+ - v1.0
+ - v1.1
+ - v1.2
+ - v1.3
+ type: string
+ private_key_file:
+ type: string
+ redirect_cleartext_from:
+ type: integer
+ secret:
+ type: string
+ secret_namespacing:
+ type: boolean
+ sni:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: false
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.12.0
+ labels:
+ app.kubernetes.io/instance: emissary-apiext
+ app.kubernetes.io/managed-by: kubectl_apply_-f_emissary-apiext.yaml
+ app.kubernetes.io/name: emissary-apiext
+ app.kubernetes.io/part-of: emissary-apiext
+ name: hosts.getambassador.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ name: emissary-apiext
+ namespace: emissary-system
+ conversionReviewVersions:
+ - v1
+ group: getambassador.io
+ names:
+ categories:
+ - ambassador-crds
+ kind: Host
+ listKind: HostList
+ plural: hosts
+ singular: host
+ preserveUnknownFields: false
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .spec.hostname
+ name: Hostname
+ type: string
+ - jsonPath: .status.state
+ name: State
+ type: string
+ - jsonPath: .status.phaseCompleted
+ name: Phase Completed
+ type: string
+ - jsonPath: .status.phasePending
+ name: Phase Pending
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v2
+ schema:
+ openAPIV3Schema:
+ description: Host is the Schema for the hosts API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: HostSpec defines the desired state of Host
+ properties:
+ acmeProvider:
+ description: Specifies whether/who to talk ACME with to automatically
+ manage the $tlsSecret.
+ properties:
+ authority:
+ description: Specifies who to talk ACME with to get certs. Defaults
+ to Let's Encrypt; if "none" (case-insensitive), do not try to
+ do ACME for this Host.
+ type: string
+ email:
+ type: string
+ privateKeySecret:
+ description: "Specifies the Kubernetes Secret to use to store
+ the private key of the ACME account (essentially, where to store
+ the auto-generated password for the auto-created ACME account).
+ \ You should not normally need to set this--the default value
+ is based on a combination of the ACME authority being registered
+ wit and the email address associated with the account. \n Note
+ that this is a native-Kubernetes-style core.v1.LocalObjectReference,
+ not an Ambassador-style `{name}.{namespace}` string. Because
+ we're opinionated, it does not support referencing a Secret
+ in another namespace (because most native Kubernetes resources
+ don't support that), but if we ever abandon that opinion and
+ decide to support non-local references it, it would be by adding
+ a `namespace:` field by changing it from a core.v1.LocalObjectReference
+ to a core.v1.SecretReference, not by adopting the `{name}.{namespace}`
+ notation."
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ registration:
+ description: This is normally set automatically
+ type: string
+ type: object
+ ambassador_id:
+ description: Common to all Ambassador objects (and optional).
+ items:
+ type: string
+ type: array
+ hostname:
+ description: Hostname by which the Ambassador can be reached.
+ type: string
+ previewUrl:
+ description: Configuration for the Preview URL feature of Service
+ Preview. Defaults to preview URLs not enabled.
+ properties:
+ enabled:
+ description: Is the Preview URL feature enabled?
+ type: boolean
+ type:
+ description: What type of Preview URL is allowed?
+ enum:
+ - Path
+ type: string
+ type: object
+ requestPolicy:
+ description: Request policy definition.
+ properties:
+ insecure:
+ properties:
+ action:
+ enum:
+ - Redirect
+ - Reject
+ - Route
+ type: string
+ additionalPort:
+ type: integer
+ type: object
+ type: object
+ selector:
+ description: Selector by which we can find further configuration.
+ Defaults to hostname=$hostname
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that
+ contains values, a key, and an operator that relates the key
+ and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to
+ a set of values. Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the
+ operator is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator
+ is "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ tls:
+ description: TLS configuration. It is not valid to specify both `tlsContext`
+ and `tls`.
+ properties:
+ alpn_protocols:
+ type: string
+ ca_secret:
+ type: string
+ cacert_chain_file:
+ type: string
+ cert_chain_file:
+ type: string
+ cert_required:
+ type: boolean
+ cipher_suites:
+ items:
+ type: string
+ type: array
+ ecdh_curves:
+ items:
+ type: string
+ type: array
+ max_tls_version:
+ type: string
+ min_tls_version:
+ type: string
+ private_key_file:
+ type: string
+ redirect_cleartext_from:
+ type: integer
+ sni:
+ type: string
+ v3CRLSecret:
+ type: string
+ type: object
+ tlsContext:
+ description: "Name of the TLSContext the Host resource is linked with.
+ It is not valid to specify both `tlsContext` and `tls`. \n Note
+ that this is a native-Kubernetes-style core.v1.LocalObjectReference,
+ not an Ambassador-style `{name}.{namespace}` string. Because we're
+ opinionated, it does not support referencing a Secret in another
+ namespace (because most native Kubernetes resources don't support
+ that), but if we ever abandon that opinion and decide to support
+ non-local references it, it would be by adding a `namespace:` field
+ by changing it from a core.v1.LocalObjectReference to a core.v1.SecretReference,
+ not by adopting the `{name}.{namespace}` notation."
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ tlsSecret:
+ description: Name of the Kubernetes secret into which to save generated
+ certificates. If ACME is enabled (see $acmeProvider), then the
+ default is $hostname; otherwise the default is "". If the value
+ is "", then we do not do TLS for this Host.
+ properties:
+ name:
+ description: name is unique within a namespace to reference a
+ secret resource.
+ type: string
+ namespace:
+ description: namespace defines the space within which the secret
+ name must be unique.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ description: HostStatus defines the observed state of Host
+ properties:
+ errorBackoff:
+ type: string
+ errorReason:
+ description: errorReason, errorTimestamp, and errorBackoff are valid
+ when state==Error.
+ type: string
+ errorTimestamp:
+ format: date-time
+ type: string
+ phaseCompleted:
+ description: phaseCompleted and phasePending are valid when state==Pending
+ or state==Error.
+ enum:
+ - NA
+ - DefaultsFilled
+ - ACMEUserPrivateKeyCreated
+ - ACMEUserRegistered
+ - ACMECertificateChallenge
+ type: string
+ phasePending:
+ description: phaseCompleted and phasePending are valid when state==Pending
+ or state==Error.
+ enum:
+ - NA
+ - DefaultsFilled
+ - ACMEUserPrivateKeyCreated
+ - ACMEUserRegistered
+ - ACMECertificateChallenge
+ type: string
+ state:
+ enum:
+ - Initial
+ - Pending
+ - Ready
+ - Error
+ type: string
+ tlsCertificateSource:
+ enum:
+ - Unknown
+ - None
+ - Other
+ - ACME
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - jsonPath: .spec.hostname
+ name: Hostname
+ type: string
+ - jsonPath: .status.state
+ name: State
+ type: string
+ - jsonPath: .status.phaseCompleted
+ name: Phase Completed
+ type: string
+ - jsonPath: .status.phasePending
+ name: Phase Pending
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v3alpha1
+ schema:
+ openAPIV3Schema:
+ description: Host is the Schema for the hosts API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: HostSpec defines the desired state of Host
+ properties:
+ acmeProvider:
+ description: Specifies whether/who to talk ACME with to automatically
+ manage the $tlsSecret.
+ properties:
+ authority:
+ description: Specifies who to talk ACME with to get certs. Defaults
+ to Let's Encrypt; if "none" (case-insensitive), do not try to
+ do ACME for this Host.
+ type: string
+ email:
+ type: string
+ privateKeySecret:
+ description: "Specifies the Kubernetes Secret to use to store
+ the private key of the ACME account (essentially, where to store
+ the auto-generated password for the auto-created ACME account).
+ \ You should not normally need to set this--the default value
+ is based on a combination of the ACME authority being registered
+ wit and the email address associated with the account. \n Note
+ that this is a native-Kubernetes-style core.v1.LocalObjectReference,
+ not an Ambassador-style `{name}.{namespace}` string. Because
+ we're opinionated, it does not support referencing a Secret
+ in another namespace (because most native Kubernetes resources
+ don't support that), but if we ever abandon that opinion and
+ decide to support non-local references it, it would be by adding
+ a `namespace:` field by changing it from a core.v1.LocalObjectReference
+ to a core.v1.SecretReference, not by adopting the `{name}.{namespace}`
+ notation."
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ registration:
+ description: This is normally set automatically
+ type: string
+ type: object
+ ambassador_id:
+ description: Common to all Ambassador objects (and optional).
+ items:
+ type: string
+ type: array
+ hostname:
+ description: Hostname by which the Ambassador can be reached.
+ type: string
+ mappingSelector:
+ description: Selector for Mappings we'll associate with this Host.
+ At the moment, Selector and MappingSelector are synonyms, but that
+ will change soon.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that
+ contains values, a key, and an operator that relates the key
+ and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to
+ a set of values. Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the
+ operator is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator
+ is "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ previewUrl:
+ description: Configuration for the Preview URL feature of Service
+ Preview. Defaults to preview URLs not enabled.
+ properties:
+ enabled:
+ description: Is the Preview URL feature enabled?
+ type: boolean
+ type:
+ description: What type of Preview URL is allowed?
+ enum:
+ - Path
+ type: string
+ type: object
+ requestPolicy:
+ description: Request policy definition.
+ properties:
+ insecure:
+ properties:
+ action:
+ enum:
+ - Redirect
+ - Reject
+ - Route
+ type: string
+ additionalPort:
+ type: integer
+ type: object
+ type: object
+ selector:
+ description: 'DEPRECATED: Selector by which we can find further configuration.
+ Use MappingSelector instead.'
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that
+ contains values, a key, and an operator that relates the key
+ and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to
+ a set of values. Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the
+ operator is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator
+ is "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ tls:
+ description: TLS configuration. It is not valid to specify both `tlsContext`
+ and `tls`.
+ properties:
+ alpn_protocols:
+ type: string
+ ca_secret:
+ type: string
+ cacert_chain_file:
+ type: string
+ cert_chain_file:
+ type: string
+ cert_required:
+ type: boolean
+ cipher_suites:
+ items:
+ type: string
+ type: array
+ crl_secret:
+ type: string
+ ecdh_curves:
+ items:
+ type: string
+ type: array
+ max_tls_version:
+ type: string
+ min_tls_version:
+ type: string
+ private_key_file:
+ type: string
+ redirect_cleartext_from:
+ type: integer
+ sni:
+ type: string
+ type: object
+ tlsContext:
+ description: "Name of the TLSContext the Host resource is linked with.
+ It is not valid to specify both `tlsContext` and `tls`. \n Note
+ that this is a native-Kubernetes-style core.v1.LocalObjectReference,
+ not an Ambassador-style `{name}.{namespace}` string. Because we're
+ opinionated, it does not support referencing a Secret in another
+ namespace (because most native Kubernetes resources don't support
+ that), but if we ever abandon that opinion and decide to support
+ non-local references it, it would be by adding a `namespace:` field
+ by changing it from a core.v1.LocalObjectReference to a core.v1.SecretReference,
+ not by adopting the `{name}.{namespace}` notation."
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ tlsSecret:
+ description: Name of the Kubernetes secret into which to save generated
+ certificates. If ACME is enabled (see $acmeProvider), then the
+ default is $hostname; otherwise the default is "". If the value
+ is "", then we do not do TLS for this Host.
+ properties:
+ name:
+ description: name is unique within a namespace to reference a
+ secret resource.
+ type: string
+ namespace:
+ description: namespace defines the space within which the secret
+ name must be unique.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ status:
+ description: HostStatus defines the observed state of Host
+ properties:
+ errorBackoff:
+ type: string
+ errorReason:
+ description: errorReason, errorTimestamp, and errorBackoff are valid
+ when state==Error.
+ type: string
+ errorTimestamp:
+ format: date-time
+ type: string
+ phaseCompleted:
+ description: phaseCompleted and phasePending are valid when state==Pending
+ or state==Error.
+ enum:
+ - NA
+ - DefaultsFilled
+ - ACMEUserPrivateKeyCreated
+ - ACMEUserRegistered
+ - ACMECertificateChallenge
+ type: string
+ phasePending:
+ description: phaseCompleted and phasePending are valid when state==Pending
+ or state==Error.
+ enum:
+ - NA
+ - DefaultsFilled
+ - ACMEUserPrivateKeyCreated
+ - ACMEUserRegistered
+ - ACMECertificateChallenge
+ type: string
+ state:
+ enum:
+ - Initial
+ - Pending
+ - Ready
+ - Error
+ type: string
+ tlsCertificateSource:
+ enum:
+ - Unknown
+ - None
+ - Other
+ - ACME
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
diff --git a/charts/k8s-gerrit/operator/src/main/resources/log4j2.xml b/charts/k8s-gerrit/operator/src/main/resources/log4j2.xml
new file mode 100644
index 0000000..f3dd273
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/main/resources/log4j2.xml
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<Configuration status="INFO">
+ <Appenders>
+ <Console name="Console" target="SYSTEM_OUT">
+ <PatternLayout pattern="%d{HH:mm:ss.SSS} [%-5level] %c:%L [PID:%pid] - %msg%n"/>
+ </Console>
+ </Appenders>
+ <Loggers>
+ <Root level="info">
+ <AppenderRef ref="Console"/>
+ </Root>
+ </Loggers>
+</Configuration>
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/cluster/GerritClusterE2E.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/cluster/GerritClusterE2E.java
new file mode 100644
index 0000000..b73f463
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/cluster/GerritClusterE2E.java
@@ -0,0 +1,74 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.cluster;
+
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static org.awaitility.Awaitility.await;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.notNullValue;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.k8s.operator.cluster.dependent.NfsIdmapdConfigMap;
+import com.google.gerrit.k8s.operator.cluster.dependent.SharedPVC;
+import com.google.gerrit.k8s.operator.network.IngressType;
+import com.google.gerrit.k8s.operator.test.AbstractGerritOperatorE2ETest;
+import io.fabric8.kubernetes.api.model.ConfigMap;
+import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
+import org.junit.jupiter.api.Test;
+
+public class GerritClusterE2E extends AbstractGerritOperatorE2ETest {
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+
+ @Test
+ void testSharedPvcCreated() {
+ logger.atInfo().log("Waiting max 1 minutes for the shared pvc to be created.");
+ await()
+ .atMost(1, MINUTES)
+ .untilAsserted(
+ () -> {
+ PersistentVolumeClaim pvc =
+ client
+ .persistentVolumeClaims()
+ .inNamespace(operator.getNamespace())
+ .withName(SharedPVC.SHARED_PVC_NAME)
+ .get();
+ assertThat(pvc, is(notNullValue()));
+ });
+ }
+
+ @Test
+ void testNfsIdmapdConfigMapCreated() {
+ gerritCluster.setNfsEnabled(true);
+ logger.atInfo().log("Waiting max 1 minutes for the nfs idmapd configmap to be created.");
+ await()
+ .atMost(1, MINUTES)
+ .untilAsserted(
+ () -> {
+ ConfigMap cm =
+ client
+ .configMaps()
+ .inNamespace(operator.getNamespace())
+ .withName(NfsIdmapdConfigMap.NFS_IDMAPD_CM_NAME)
+ .get();
+ assertThat(cm, is(notNullValue()));
+ });
+ }
+
+ @Override
+ protected IngressType getIngressType() {
+ return IngressType.INGRESS;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/cluster/GerritRepositoryConfigTest.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/cluster/GerritRepositoryConfigTest.java
new file mode 100644
index 0000000..5474780
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/cluster/GerritRepositoryConfigTest.java
@@ -0,0 +1,46 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.cluster;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.MatcherAssert.assertThat;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GerritRepositoryConfig;
+import org.junit.jupiter.api.Test;
+
+public class GerritRepositoryConfigTest {
+
+ @Test
+ public void testFullImageNameComputesCorrectly() {
+ assertThat(
+ new GerritRepositoryConfig().getFullImageName("gerrit"),
+ is(equalTo("docker.io/k8sgerrit/gerrit:latest")));
+
+ GerritRepositoryConfig repoConfig1 = new GerritRepositoryConfig();
+ repoConfig1.setOrg("testorg");
+ repoConfig1.setRegistry("registry.example.com");
+ repoConfig1.setTag("v1.0");
+ assertThat(
+ repoConfig1.getFullImageName("gerrit"),
+ is(equalTo("registry.example.com/testorg/gerrit:v1.0")));
+
+ GerritRepositoryConfig repoConfig2 = new GerritRepositoryConfig();
+ repoConfig2.setOrg(null);
+ repoConfig2.setRegistry(null);
+ repoConfig2.setTag(null);
+ assertThat(repoConfig2.getFullImageName("gerrit"), is(equalTo("gerrit")));
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/gerrit/ClusterManagedGerritWithIngressE2E.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/gerrit/ClusterManagedGerritWithIngressE2E.java
new file mode 100644
index 0000000..c848aa9
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/gerrit/ClusterManagedGerritWithIngressE2E.java
@@ -0,0 +1,140 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gerrit;
+
+import static com.google.gerrit.k8s.operator.network.ingress.dependent.GerritClusterIngress.INGRESS_NAME;
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static org.awaitility.Awaitility.await;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.not;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.extensions.api.GerritApi;
+import com.google.gerrit.k8s.operator.network.IngressType;
+import com.google.gerrit.k8s.operator.test.AbstractGerritOperatorE2ETest;
+import com.google.gerrit.k8s.operator.test.TestGerrit;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplate;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplateSpec.GerritMode;
+import io.fabric8.kubernetes.api.model.networking.v1.Ingress;
+import io.fabric8.kubernetes.api.model.networking.v1.IngressLoadBalancerIngress;
+import io.fabric8.kubernetes.api.model.networking.v1.IngressStatus;
+import java.util.List;
+import org.junit.jupiter.api.Test;
+
+public class ClusterManagedGerritWithIngressE2E extends AbstractGerritOperatorE2ETest {
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+
+ @Test
+ void testPrimaryGerritIsCreated() throws Exception {
+ TestGerrit gerrit =
+ new TestGerrit(client, testProps, GerritMode.PRIMARY, "gerrit", operator.getNamespace());
+ GerritTemplate gerritTemplate = gerrit.createGerritTemplate();
+ gerritCluster.addGerrit(gerritTemplate);
+ gerritCluster.deploy();
+
+ logger.atInfo().log("Waiting max 2 minutes for the Ingress to have an external IP.");
+ await()
+ .atMost(2, MINUTES)
+ .untilAsserted(
+ () -> {
+ Ingress ingress =
+ client
+ .network()
+ .v1()
+ .ingresses()
+ .inNamespace(operator.getNamespace())
+ .withName(INGRESS_NAME)
+ .get();
+ assertThat(ingress, is(notNullValue()));
+ IngressStatus status = ingress.getStatus();
+ assertThat(status, is(notNullValue()));
+ List<IngressLoadBalancerIngress> lbIngresses = status.getLoadBalancer().getIngress();
+ assertThat(lbIngresses, hasSize(1));
+ assertThat(lbIngresses.get(0).getIp(), is(notNullValue()));
+ });
+
+ GerritApi gerritApi = gerritCluster.getGerritApiClient(gerritTemplate, IngressType.INGRESS);
+ await()
+ .atMost(2, MINUTES)
+ .untilAsserted(
+ () -> {
+ assertDoesNotThrow(() -> gerritApi.config().server().getVersion());
+ assertThat(gerritApi.config().server().getVersion(), notNullValue());
+ assertThat(gerritApi.config().server().getVersion(), not(is("<2.8")));
+ logger.atInfo().log("Gerrit version: %s", gerritApi.config().server().getVersion());
+ });
+ }
+
+ @Test
+ void testGerritReplicaIsCreated() throws Exception {
+ String gerritName = "gerrit-replica";
+ TestGerrit gerrit =
+ new TestGerrit(client, testProps, GerritMode.REPLICA, gerritName, operator.getNamespace());
+ gerritCluster.addGerrit(gerrit.createGerritTemplate());
+ gerritCluster.deploy();
+
+ assertTrue(
+ client
+ .pods()
+ .inNamespace(operator.getNamespace())
+ .withName(gerritName + "-0")
+ .inContainer("gerrit")
+ .getLog()
+ .contains("Gerrit Code Review [replica]"));
+ }
+
+ @Test
+ void testGerritReplicaAndPrimaryGerritAreCreated() throws Exception {
+ String primaryGerritName = "gerrit";
+ TestGerrit primaryGerrit =
+ new TestGerrit(
+ client, testProps, GerritMode.PRIMARY, primaryGerritName, operator.getNamespace());
+ gerritCluster.addGerrit(primaryGerrit.createGerritTemplate());
+ String gerritReplicaName = "gerrit-replica";
+ TestGerrit gerritReplica =
+ new TestGerrit(
+ client, testProps, GerritMode.REPLICA, gerritReplicaName, operator.getNamespace());
+ gerritCluster.addGerrit(gerritReplica.createGerritTemplate());
+ gerritCluster.deploy();
+
+ assertTrue(
+ client
+ .pods()
+ .inNamespace(operator.getNamespace())
+ .withName(primaryGerritName + "-0")
+ .inContainer("gerrit")
+ .getLog()
+ .contains("Gerrit Code Review"));
+
+ assertTrue(
+ client
+ .pods()
+ .inNamespace(operator.getNamespace())
+ .withName(gerritReplicaName + "-0")
+ .inContainer("gerrit")
+ .getLog()
+ .contains("Gerrit Code Review [replica]"));
+ }
+
+ @Override
+ protected IngressType getIngressType() {
+ return IngressType.INGRESS;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/gerrit/ClusterManagedGerritWithIstioE2E.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/gerrit/ClusterManagedGerritWithIstioE2E.java
new file mode 100644
index 0000000..fdf5ccb
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/gerrit/ClusterManagedGerritWithIstioE2E.java
@@ -0,0 +1,144 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gerrit;
+
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static org.awaitility.Awaitility.await;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.not;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.extensions.api.GerritApi;
+import com.google.gerrit.k8s.operator.network.IngressType;
+import com.google.gerrit.k8s.operator.test.AbstractGerritOperatorE2ETest;
+import com.google.gerrit.k8s.operator.test.TestGerrit;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplate;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplateSpec.GerritMode;
+import org.junit.jupiter.api.Test;
+
+public class ClusterManagedGerritWithIstioE2E extends AbstractGerritOperatorE2ETest {
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+
+ @Test
+ void testPrimaryGerritWithIstio() throws Exception {
+ GerritTemplate gerrit =
+ new TestGerrit(client, testProps, GerritMode.PRIMARY, "gerrit", operator.getNamespace())
+ .createGerritTemplate();
+ gerritCluster.addGerrit(gerrit);
+ gerritCluster.deploy();
+
+ GerritApi gerritApi = gerritCluster.getGerritApiClient(gerrit, IngressType.ISTIO);
+ await()
+ .atMost(2, MINUTES)
+ .untilAsserted(
+ () -> {
+ assertDoesNotThrow(() -> gerritApi.config().server().getVersion());
+ assertThat(gerritApi.config().server().getVersion(), notNullValue());
+ assertThat(gerritApi.config().server().getVersion(), not(is("<2.8")));
+ logger.atInfo().log("Gerrit version: %s", gerritApi.config().server().getVersion());
+ });
+ }
+
+ @Test
+ void testGerritReplicaIsCreated() throws Exception {
+ String gerritName = "gerrit-replica";
+ TestGerrit gerrit =
+ new TestGerrit(client, testProps, GerritMode.REPLICA, gerritName, operator.getNamespace());
+ gerritCluster.addGerrit(gerrit.createGerritTemplate());
+ gerritCluster.deploy();
+
+ assertTrue(
+ client
+ .pods()
+ .inNamespace(operator.getNamespace())
+ .withName(gerritName + "-0")
+ .inContainer("gerrit")
+ .getLog()
+ .contains("Gerrit Code Review [replica]"));
+ }
+
+ @Test
+ void testMultipleGerritReplicaAreCreated() throws Exception {
+ String gerritName = "gerrit-replica-1";
+ TestGerrit gerrit =
+ new TestGerrit(client, testProps, GerritMode.REPLICA, gerritName, operator.getNamespace());
+ gerritCluster.addGerrit(gerrit.createGerritTemplate());
+ String gerritName2 = "gerrit-replica-2";
+ TestGerrit gerrit2 =
+ new TestGerrit(client, testProps, GerritMode.REPLICA, gerritName2, operator.getNamespace());
+ gerritCluster.addGerrit(gerrit2.createGerritTemplate());
+ gerritCluster.deploy();
+
+ assertTrue(
+ client
+ .pods()
+ .inNamespace(operator.getNamespace())
+ .withName(gerritName + "-0")
+ .inContainer("gerrit")
+ .getLog()
+ .contains("Gerrit Code Review [replica]"));
+
+ assertTrue(
+ client
+ .pods()
+ .inNamespace(operator.getNamespace())
+ .withName(gerritName2 + "-0")
+ .inContainer("gerrit")
+ .getLog()
+ .contains("Gerrit Code Review [replica]"));
+ }
+
+ @Test
+ void testGerritReplicaAndPrimaryGerritAreCreated() throws Exception {
+ String primaryGerritName = "gerrit";
+ TestGerrit primaryGerrit =
+ new TestGerrit(
+ client, testProps, GerritMode.PRIMARY, primaryGerritName, operator.getNamespace());
+ gerritCluster.addGerrit(primaryGerrit.createGerritTemplate());
+ String gerritReplicaName = "gerrit-replica";
+ TestGerrit gerritReplica =
+ new TestGerrit(
+ client, testProps, GerritMode.REPLICA, gerritReplicaName, operator.getNamespace());
+ gerritCluster.addGerrit(gerritReplica.createGerritTemplate());
+ gerritCluster.deploy();
+
+ assertTrue(
+ client
+ .pods()
+ .inNamespace(operator.getNamespace())
+ .withName(primaryGerritName + "-0")
+ .inContainer("gerrit")
+ .getLog()
+ .contains("Gerrit Code Review"));
+
+ assertTrue(
+ client
+ .pods()
+ .inNamespace(operator.getNamespace())
+ .withName(gerritReplicaName + "-0")
+ .inContainer("gerrit")
+ .getLog()
+ .contains("Gerrit Code Review [replica]"));
+ }
+
+ @Override
+ protected IngressType getIngressType() {
+ return IngressType.ISTIO;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/gerrit/GerritConfigReconciliationE2E.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/gerrit/GerritConfigReconciliationE2E.java
new file mode 100644
index 0000000..342d524
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/gerrit/GerritConfigReconciliationE2E.java
@@ -0,0 +1,163 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gerrit;
+
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.awaitility.Awaitility.await;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import com.google.gerrit.k8s.operator.network.IngressType;
+import com.google.gerrit.k8s.operator.test.AbstractGerritOperatorE2ETest;
+import com.google.gerrit.k8s.operator.test.TestGerrit;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplate;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplateSpec;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplateSpec.GerritMode;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.HttpSshServiceConfig;
+import java.util.HashMap;
+import java.util.Map;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+public class GerritConfigReconciliationE2E extends AbstractGerritOperatorE2ETest {
+ private static final String GERRIT_NAME = "gerrit";
+ private static final String RESTART_ANNOTATION = "kubectl.kubernetes.io/restartedAt";
+
+ private GerritTemplate gerritTemplate;
+
+ @BeforeEach
+ public void setupGerrit() throws Exception {
+ TestGerrit gerrit =
+ new TestGerrit(client, testProps, GerritMode.PRIMARY, GERRIT_NAME, operator.getNamespace());
+ gerritTemplate = gerrit.createGerritTemplate();
+ gerritCluster.addGerrit(gerritTemplate);
+ gerritCluster.deploy();
+ }
+
+ @Test
+ void testNoRestartIfGerritConfigUnchanged() throws Exception {
+ Map<String, String> annotations = getReplicaSetAnnotations();
+ assertFalse(annotations.containsKey(RESTART_ANNOTATION));
+
+ gerritCluster.removeGerrit(gerritTemplate);
+ GerritTemplateSpec gerritSpec = gerritTemplate.getSpec();
+ HttpSshServiceConfig gerritServiceConfig = new HttpSshServiceConfig();
+ gerritServiceConfig.setHttpPort(48080);
+ gerritSpec.setService(gerritServiceConfig);
+ gerritTemplate.setSpec(gerritSpec);
+ gerritCluster.addGerrit(gerritTemplate);
+ gerritCluster.deploy();
+
+ await()
+ .atMost(30, SECONDS)
+ .untilAsserted(
+ () -> {
+ assertTrue(
+ client
+ .services()
+ .inNamespace(operator.getNamespace())
+ .withName(GERRIT_NAME)
+ .get()
+ .getSpec()
+ .getPorts()
+ .stream()
+ .allMatch(p -> p.getPort() == 48080));
+ assertFalse(getReplicaSetAnnotations().containsKey(RESTART_ANNOTATION));
+ });
+ }
+
+ @Test
+ void testRestartOnGerritConfigMapChange() throws Exception {
+ String podV1Uid =
+ client
+ .pods()
+ .inNamespace(operator.getNamespace())
+ .withName(GERRIT_NAME + "-0")
+ .get()
+ .getMetadata()
+ .getUid();
+
+ gerritCluster.removeGerrit(gerritTemplate);
+ GerritTemplateSpec gerritSpec = gerritTemplate.getSpec();
+ Map<String, String> cfgs = new HashMap<>();
+ cfgs.putAll(gerritSpec.getConfigFiles());
+ cfgs.put("test.config", "[test]\n test");
+ gerritSpec.setConfigFiles(cfgs);
+ gerritTemplate.setSpec(gerritSpec);
+ gerritCluster.addGerrit(gerritTemplate);
+ gerritCluster.deploy();
+
+ assertGerritRestart(podV1Uid);
+ }
+
+ @Test
+ void testRestartOnGerritSecretChange() throws Exception {
+ String podV1Uid =
+ client
+ .pods()
+ .inNamespace(operator.getNamespace())
+ .withName(GERRIT_NAME + "-0")
+ .get()
+ .getMetadata()
+ .getUid();
+
+ secureConfig.modify("test", "test", "test");
+
+ assertGerritRestart(podV1Uid);
+ }
+
+ private void assertGerritRestart(String uidOld) {
+ await()
+ .atMost(2, MINUTES)
+ .untilAsserted(
+ () -> {
+ assertTrue(
+ client
+ .pods()
+ .inNamespace(operator.getNamespace())
+ .withName(GERRIT_NAME + "-0")
+ .isReady());
+ assertTrue(getReplicaSetAnnotations().containsKey(RESTART_ANNOTATION));
+ assertFalse(
+ uidOld.equals(
+ client
+ .pods()
+ .inNamespace(operator.getNamespace())
+ .withName(GERRIT_NAME + "-0")
+ .get()
+ .getMetadata()
+ .getUid()));
+ });
+ }
+
+ private Map<String, String> getReplicaSetAnnotations() {
+ return client
+ .apps()
+ .statefulSets()
+ .inNamespace(operator.getNamespace())
+ .withName(GERRIT_NAME)
+ .get()
+ .getSpec()
+ .getTemplate()
+ .getMetadata()
+ .getAnnotations();
+ }
+
+ @Override
+ protected IngressType getIngressType() {
+ return IngressType.INGRESS;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/gerrit/StandaloneGerritE2E.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/gerrit/StandaloneGerritE2E.java
new file mode 100644
index 0000000..b7359ab
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/gerrit/StandaloneGerritE2E.java
@@ -0,0 +1,64 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gerrit;
+
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import com.google.gerrit.k8s.operator.network.IngressType;
+import com.google.gerrit.k8s.operator.test.AbstractGerritOperatorE2ETest;
+import com.google.gerrit.k8s.operator.test.TestGerrit;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplateSpec.GerritMode;
+import org.junit.jupiter.api.Test;
+
+public class StandaloneGerritE2E extends AbstractGerritOperatorE2ETest {
+
+ @Test
+ void testPrimaryGerritIsCreated() throws Exception {
+ String gerritName = "gerrit";
+ TestGerrit testGerrit = new TestGerrit(client, testProps, gerritName, operator.getNamespace());
+ testGerrit.deploy();
+
+ assertTrue(
+ client
+ .pods()
+ .inNamespace(operator.getNamespace())
+ .withName(gerritName + "-0")
+ .inContainer("gerrit")
+ .getLog()
+ .contains("Gerrit Code Review"));
+ }
+
+ @Test
+ void testGerritReplicaIsCreated() throws Exception {
+ String gerritName = "gerrit-replica";
+ TestGerrit testGerrit =
+ new TestGerrit(client, testProps, GerritMode.REPLICA, gerritName, operator.getNamespace());
+ testGerrit.deploy();
+
+ assertTrue(
+ client
+ .pods()
+ .inNamespace(operator.getNamespace())
+ .withName(gerritName + "-0")
+ .inContainer("gerrit")
+ .getLog()
+ .contains("Gerrit Code Review [replica]"));
+ }
+
+ @Override
+ protected IngressType getIngressType() {
+ return IngressType.INGRESS;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/gerrit/config/GerritConfigBuilderTest.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/gerrit/config/GerritConfigBuilderTest.java
new file mode 100644
index 0000000..cedcebb
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/gerrit/config/GerritConfigBuilderTest.java
@@ -0,0 +1,85 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gerrit.config;
+
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritSpec;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.IngressConfig;
+import com.google.gerrit.k8s.operator.v1alpha.gerrit.config.GerritConfigBuilder;
+import java.util.Map;
+import java.util.Set;
+import org.assertj.core.util.Arrays;
+import org.eclipse.jgit.lib.Config;
+import org.junit.jupiter.api.Test;
+
+public class GerritConfigBuilderTest {
+
+ @Test
+ public void emptyGerritConfigContainsAllPresetConfiguration() {
+ Gerrit gerrit = createGerrit("");
+ ConfigBuilder cfgBuilder = new GerritConfigBuilder(gerrit);
+ Config cfg = cfgBuilder.build();
+ for (RequiredOption<?> opt : cfgBuilder.getRequiredOptions()) {
+ if (opt.getExpected() instanceof String || opt.getExpected() instanceof Boolean) {
+ assertTrue(
+ cfg.getString(opt.getSection(), opt.getSubSection(), opt.getKey())
+ .equals(opt.getExpected().toString()));
+ } else if (opt.getExpected() instanceof Set) {
+ assertTrue(
+ Arrays.asList(cfg.getStringList(opt.getSection(), opt.getSubSection(), opt.getKey()))
+ .containsAll((Set<?>) opt.getExpected()));
+ }
+ }
+ }
+
+ @Test
+ public void invalidConfigValueIsRejected() {
+ Gerrit gerrit = createGerrit("[gerrit]\n basePath = invalid");
+ assertThrows(IllegalStateException.class, () -> new GerritConfigBuilder(gerrit).build());
+ }
+
+ @Test
+ public void validConfigValueIsAccepted() {
+ Gerrit gerrit = createGerrit("[gerrit]\n basePath = git");
+ assertDoesNotThrow(() -> new GerritConfigBuilder(gerrit).build());
+ }
+
+ @Test
+ public void canonicalWebUrlIsConfigured() {
+ IngressConfig ingressConfig = new IngressConfig();
+ ingressConfig.setEnabled(true);
+ ingressConfig.setHost("gerrit.example.com");
+
+ GerritSpec gerritSpec = new GerritSpec();
+ gerritSpec.setIngress(ingressConfig);
+ Gerrit gerrit = new Gerrit();
+ gerrit.setSpec(gerritSpec);
+ Config cfg = new GerritConfigBuilder(gerrit).build();
+ assertTrue(
+ cfg.getString("gerrit", null, "canonicalWebUrl").equals("http://gerrit.example.com"));
+ }
+
+ private Gerrit createGerrit(String configText) {
+ GerritSpec gerritSpec = new GerritSpec();
+ gerritSpec.setConfigFiles(Map.of("gerrit.config", configText));
+ Gerrit gerrit = new Gerrit();
+ gerrit.setSpec(gerritSpec);
+ return gerrit;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/gitgc/GitGarbageCollectionE2E.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/gitgc/GitGarbageCollectionE2E.java
new file mode 100644
index 0000000..7f31ac2
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/gitgc/GitGarbageCollectionE2E.java
@@ -0,0 +1,234 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.gitgc;
+
+import static com.google.gerrit.k8s.operator.test.TestGerritCluster.CLUSTER_NAME;
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static org.awaitility.Awaitility.await;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.not;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.junit.jupiter.api.Assertions.assertNull;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.k8s.operator.network.IngressType;
+import com.google.gerrit.k8s.operator.test.AbstractGerritOperatorE2ETest;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gitgc.GitGarbageCollection;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gitgc.GitGarbageCollectionSpec;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gitgc.GitGarbageCollectionStatus;
+import io.fabric8.kubernetes.api.model.ObjectMetaBuilder;
+import io.fabric8.kubernetes.api.model.batch.v1.CronJob;
+import io.fabric8.kubernetes.api.model.batch.v1.Job;
+import java.util.List;
+import java.util.Set;
+import org.junit.jupiter.api.Test;
+
+public class GitGarbageCollectionE2E extends AbstractGerritOperatorE2ETest {
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+ static final String GITGC_SCHEDULE = "*/1 * * * *";
+
+ @Test
+ void testGitGcAllProjectsCreationAndDeletion() {
+ GitGarbageCollection gitGc = createCompleteGc();
+
+ logger.atInfo().log("Waiting max 2 minutes for GitGc to be created.");
+ await()
+ .atMost(2, MINUTES)
+ .untilAsserted(
+ () -> {
+ assertGitGcCreation(gitGc.getMetadata().getName());
+ assertGitGcCronJobCreation(gitGc.getMetadata().getName());
+ assertGitGcJobCreation(gitGc.getMetadata().getName());
+ });
+
+ logger.atInfo().log("Deleting test GitGc object: %s", gitGc);
+ client.resource(gitGc).delete();
+ awaitGitGcDeletionAssertion(gitGc.getMetadata().getName());
+ }
+
+ @Test
+ void testGitGcSelectedProjects() {
+ GitGarbageCollection gitGc = createSelectiveGc("selective-gc", Set.of("All-Projects", "test"));
+
+ logger.atInfo().log("Waiting max 2 minutes for GitGc to be created.");
+ await()
+ .atMost(2, MINUTES)
+ .untilAsserted(
+ () -> {
+ assertGitGcCreation(gitGc.getMetadata().getName());
+ assertGitGcCronJobCreation(gitGc.getMetadata().getName());
+ assertGitGcJobCreation(gitGc.getMetadata().getName());
+ });
+
+ client.resource(gitGc).delete();
+ }
+
+ @Test
+ void testSelectiveGcIsExcludedFromCompleteGc() {
+ GitGarbageCollection completeGitGc = createCompleteGc();
+
+ logger.atInfo().log("Waiting max 2 minutes for GitGc to be created.");
+ await()
+ .atMost(2, MINUTES)
+ .untilAsserted(
+ () -> {
+ assertGitGcCreation(completeGitGc.getMetadata().getName());
+ assertGitGcCronJobCreation(completeGitGc.getMetadata().getName());
+ });
+
+ Set<String> selectedProjects = Set.of("All-Projects", "test");
+ GitGarbageCollection selectiveGitGc = createSelectiveGc("selective-gc", selectedProjects);
+
+ logger.atInfo().log("Waiting max 2 minutes for GitGc to be created.");
+ await()
+ .atMost(2, MINUTES)
+ .untilAsserted(
+ () -> {
+ assertGitGcCreation(selectiveGitGc.getMetadata().getName());
+ assertGitGcCronJobCreation(selectiveGitGc.getMetadata().getName());
+ });
+
+ await()
+ .atMost(2, MINUTES)
+ .untilAsserted(
+ () -> {
+ GitGarbageCollection updatedCompleteGitGc =
+ client
+ .resources(GitGarbageCollection.class)
+ .inNamespace(operator.getNamespace())
+ .withName(completeGitGc.getMetadata().getName())
+ .get();
+ assert updatedCompleteGitGc
+ .getStatus()
+ .getExcludedProjects()
+ .containsAll(selectedProjects);
+ });
+
+ client.resource(selectiveGitGc).delete();
+ awaitGitGcDeletionAssertion(selectiveGitGc.getMetadata().getName());
+
+ await()
+ .atMost(2, MINUTES)
+ .untilAsserted(
+ () -> {
+ GitGarbageCollection updatedCompleteGitGc =
+ client
+ .resources(GitGarbageCollection.class)
+ .inNamespace(operator.getNamespace())
+ .withName(completeGitGc.getMetadata().getName())
+ .get();
+ assert updatedCompleteGitGc.getStatus().getExcludedProjects().isEmpty();
+ });
+ }
+
+ private GitGarbageCollection createCompleteGc() {
+ GitGarbageCollection gitGc = new GitGarbageCollection();
+ gitGc.setMetadata(
+ new ObjectMetaBuilder()
+ .withName("gitgc-complete")
+ .withNamespace(operator.getNamespace())
+ .build());
+ GitGarbageCollectionSpec spec = new GitGarbageCollectionSpec();
+ spec.setSchedule(GITGC_SCHEDULE);
+ spec.setCluster(CLUSTER_NAME);
+ gitGc.setSpec(spec);
+
+ logger.atInfo().log("Creating test GitGc object: %s", gitGc);
+ client.resource(gitGc).createOrReplace();
+
+ return gitGc;
+ }
+
+ private GitGarbageCollection createSelectiveGc(String name, Set<String> projects) {
+ GitGarbageCollection gitGc = new GitGarbageCollection();
+ gitGc.setMetadata(
+ new ObjectMetaBuilder().withName(name).withNamespace(operator.getNamespace()).build());
+ GitGarbageCollectionSpec spec = new GitGarbageCollectionSpec();
+ spec.setSchedule(GITGC_SCHEDULE);
+ spec.setCluster(CLUSTER_NAME);
+ spec.setProjects(projects);
+ gitGc.setSpec(spec);
+
+ logger.atInfo().log("Creating test GitGc object: %s", gitGc);
+ client.resource(gitGc).createOrReplace();
+
+ return gitGc;
+ }
+
+ private void assertGitGcCreation(String gitGcName) {
+ GitGarbageCollection updatedGitGc =
+ client
+ .resources(GitGarbageCollection.class)
+ .inNamespace(operator.getNamespace())
+ .withName(gitGcName)
+ .get();
+ assertThat(updatedGitGc, is(notNullValue()));
+ assertThat(
+ updatedGitGc.getStatus().getState(),
+ is(not(equalTo(GitGarbageCollectionStatus.GitGcState.ERROR))));
+ }
+
+ private void assertGitGcCronJobCreation(String gitGcName) {
+ CronJob cronJob =
+ client
+ .batch()
+ .v1()
+ .cronjobs()
+ .inNamespace(operator.getNamespace())
+ .withName(gitGcName)
+ .get();
+ assertThat(cronJob, is(notNullValue()));
+ }
+
+ private void awaitGitGcDeletionAssertion(String gitGcName) {
+ logger.atInfo().log("Waiting max 2 minutes for GitGc to be deleted.");
+ await()
+ .atMost(2, MINUTES)
+ .untilAsserted(
+ () -> {
+ GitGarbageCollection updatedGitGc =
+ client
+ .resources(GitGarbageCollection.class)
+ .inNamespace(operator.getNamespace())
+ .withName(gitGcName)
+ .get();
+ assertNull(updatedGitGc);
+
+ CronJob cronJob =
+ client
+ .batch()
+ .v1()
+ .cronjobs()
+ .inNamespace(operator.getNamespace())
+ .withName(gitGcName)
+ .get();
+ assertNull(cronJob);
+ });
+ }
+
+ private void assertGitGcJobCreation(String gitGcName) {
+ List<Job> jobRuns =
+ client.batch().v1().jobs().inNamespace(operator.getNamespace()).list().getItems();
+ assert (jobRuns.size() > 0);
+ assert (jobRuns.get(0).getMetadata().getName().startsWith(gitGcName));
+ }
+
+ @Override
+ protected IngressType getIngressType() {
+ return IngressType.NONE;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterAmbassadorTest.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterAmbassadorTest.java
new file mode 100644
index 0000000..3f5a9cd
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/network/ambassador/dependent/GerritClusterAmbassadorTest.java
@@ -0,0 +1,130 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ambassador.dependent;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.getambassador.v2.Host;
+import io.getambassador.v2.Mapping;
+import io.getambassador.v2.TLSContext;
+import io.javaoperatorsdk.operator.ReconcilerUtils;
+import java.lang.reflect.InvocationTargetException;
+import java.util.Map;
+import java.util.stream.Stream;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
+
+public class GerritClusterAmbassadorTest {
+
+ @ParameterizedTest
+ @MethodSource("provideYamlManifests")
+ public void expectedGerritClusterAmbassadorComponentsCreated(
+ String inputFile, Map<String, String> expectedOutputFileNames)
+ throws ClassNotFoundException, NoSuchMethodException, InstantiationException,
+ IllegalAccessException, InvocationTargetException {
+ GerritNetwork gerritNetwork =
+ ReconcilerUtils.loadYaml(GerritNetwork.class, this.getClass(), inputFile);
+
+ for (Map.Entry<String, String> entry : expectedOutputFileNames.entrySet()) {
+ String className = entry.getKey();
+ String expectedOutputFile = entry.getValue();
+
+ Class<?> clazz = Class.forName(className);
+ Object dependentObject = clazz.getDeclaredConstructor(new Class[] {}).newInstance();
+
+ if (dependentObject instanceof MappingDependentResourceInterface) {
+ MappingDependentResourceInterface dependent =
+ (MappingDependentResourceInterface) dependentObject;
+ Mapping result = dependent.desired(gerritNetwork, null);
+ Mapping expected =
+ ReconcilerUtils.loadYaml(Mapping.class, this.getClass(), expectedOutputFile);
+ assertThat(result.getSpec()).isEqualTo(expected.getSpec());
+ } else if (dependentObject instanceof GerritClusterTLSContext) {
+ GerritClusterTLSContext dependent = (GerritClusterTLSContext) dependentObject;
+ TLSContext result = dependent.desired(gerritNetwork, null);
+ TLSContext expected =
+ ReconcilerUtils.loadYaml(TLSContext.class, this.getClass(), expectedOutputFile);
+ assertThat(result.getSpec()).isEqualTo(expected.getSpec());
+ } else if (dependentObject instanceof GerritClusterHost) {
+ GerritClusterHost dependent = (GerritClusterHost) dependentObject;
+ Host result = dependent.desired(gerritNetwork, null);
+ Host expected = ReconcilerUtils.loadYaml(Host.class, this.getClass(), expectedOutputFile);
+ assertThat(result.getSpec()).isEqualTo(expected.getSpec());
+ }
+ }
+ }
+
+ private static Stream<Arguments> provideYamlManifests() {
+ return Stream.of(
+ Arguments.of(
+ "../../gerritnetwork_primary_replica_tls.yaml",
+ Map.of(
+ GerritClusterMappingGETReplica.class.getName(),
+ "mappingGETReplica_primary_replica.yaml",
+ GerritClusterMappingPOSTReplica.class.getName(),
+ "mappingPOSTReplica_primary_replica.yaml",
+ GerritClusterMappingPrimary.class.getName(), "mappingPrimary_primary_replica.yaml",
+ GerritClusterTLSContext.class.getName(), "tlscontext.yaml")),
+ Arguments.of(
+ "../../gerritnetwork_primary_replica_tls_create_host.yaml",
+ Map.of(
+ GerritClusterMappingGETReplica.class.getName(),
+ "mappingGETReplica_primary_replica.yaml",
+ GerritClusterMappingPOSTReplica.class.getName(),
+ "mappingPOSTReplica_primary_replica.yaml",
+ GerritClusterMappingPrimary.class.getName(), "mappingPrimary_primary_replica.yaml",
+ GerritClusterTLSContext.class.getName(), "tlscontext.yaml",
+ GerritClusterHost.class.getName(), "host_with_tls.yaml")),
+ Arguments.of(
+ "../../gerritnetwork_primary_replica.yaml",
+ Map.of(
+ GerritClusterMappingGETReplica.class.getName(),
+ "mappingGETReplica_primary_replica.yaml",
+ GerritClusterMappingPOSTReplica.class.getName(),
+ "mappingPOSTReplica_primary_replica.yaml",
+ GerritClusterMappingPrimary.class.getName(),
+ "mappingPrimary_primary_replica.yaml")),
+ Arguments.of(
+ "../../gerritnetwork_primary_replica_create_host.yaml",
+ Map.of(
+ GerritClusterMappingGETReplica.class.getName(),
+ "mappingGETReplica_primary_replica.yaml",
+ GerritClusterMappingPOSTReplica.class.getName(),
+ "mappingPOSTReplica_primary_replica.yaml",
+ GerritClusterMappingPrimary.class.getName(), "mappingPrimary_primary_replica.yaml",
+ GerritClusterHost.class.getName(), "host.yaml")),
+ Arguments.of(
+ "../../gerritnetwork_primary.yaml",
+ Map.of(GerritClusterMapping.class.getName(), "mapping_primary.yaml")),
+ Arguments.of(
+ "../../gerritnetwork_replica.yaml",
+ Map.of(GerritClusterMapping.class.getName(), "mapping_replica.yaml")),
+ Arguments.of(
+ "../../gerritnetwork_receiver_replica.yaml",
+ Map.of(
+ GerritClusterMapping.class.getName(), "mapping_replica.yaml",
+ GerritClusterMappingReceiver.class.getName(), "mapping_receiver.yaml",
+ GerritClusterMappingReceiverGET.class.getName(), "mappingGET_receiver.yaml")),
+ Arguments.of(
+ "../../gerritnetwork_receiver_replica_tls.yaml",
+ Map.of(
+ GerritClusterMapping.class.getName(), "mapping_replica.yaml",
+ GerritClusterMappingReceiver.class.getName(), "mapping_receiver.yaml",
+ GerritClusterMappingReceiverGET.class.getName(), "mappingGET_receiver.yaml",
+ GerritClusterTLSContext.class.getName(), "tlscontext.yaml")));
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/network/ingress/dependent/GerritClusterIngressTest.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/network/ingress/dependent/GerritClusterIngressTest.java
new file mode 100644
index 0000000..a2e6a24
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/network/ingress/dependent/GerritClusterIngressTest.java
@@ -0,0 +1,52 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.ingress.dependent;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.fabric8.kubernetes.api.model.networking.v1.Ingress;
+import io.javaoperatorsdk.operator.ReconcilerUtils;
+import java.util.stream.Stream;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
+
+public class GerritClusterIngressTest {
+ @ParameterizedTest
+ @MethodSource("provideYamlManifests")
+ public void expectedGerritClusterIngressCreated(String inputFile, String expectedOutputFile) {
+ GerritClusterIngress dependent = new GerritClusterIngress();
+ Ingress result =
+ dependent.desired(
+ ReconcilerUtils.loadYaml(GerritNetwork.class, this.getClass(), inputFile), null);
+ Ingress expected = ReconcilerUtils.loadYaml(Ingress.class, this.getClass(), expectedOutputFile);
+ assertThat(result.getSpec()).isEqualTo(expected.getSpec());
+ assertThat(result.getMetadata().getAnnotations())
+ .containsExactlyEntriesIn(expected.getMetadata().getAnnotations());
+ }
+
+ private static Stream<Arguments> provideYamlManifests() {
+ return Stream.of(
+ Arguments.of(
+ "../../gerritnetwork_primary_replica_tls.yaml", "ingress_primary_replica_tls.yaml"),
+ Arguments.of("../../gerritnetwork_primary_replica.yaml", "ingress_primary_replica.yaml"),
+ Arguments.of("../../gerritnetwork_primary.yaml", "ingress_primary.yaml"),
+ Arguments.of("../../gerritnetwork_replica.yaml", "ingress_replica.yaml"),
+ Arguments.of("../../gerritnetwork_receiver_replica.yaml", "ingress_receiver_replica.yaml"),
+ Arguments.of(
+ "../../gerritnetwork_receiver_replica_tls.yaml", "ingress_receiver_replica_tls.yaml"));
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/network/istio/dependent/GerritClusterIstioTest.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/network/istio/dependent/GerritClusterIstioTest.java
new file mode 100644
index 0000000..eaed636
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/network/istio/dependent/GerritClusterIstioTest.java
@@ -0,0 +1,88 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.network.istio.dependent;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import io.fabric8.istio.api.networking.v1beta1.Gateway;
+import io.fabric8.istio.api.networking.v1beta1.VirtualService;
+import io.javaoperatorsdk.operator.ReconcilerUtils;
+import java.util.stream.Stream;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
+
+public class GerritClusterIstioTest {
+ @ParameterizedTest
+ @MethodSource("provideYamlManifests")
+ public void expectedGerritClusterIstioComponentsCreated(
+ String inputFile, String expectedGatewayOutputFile, String expectedVirtualServiceOutputFile) {
+ GerritNetwork gerritNetwork =
+ ReconcilerUtils.loadYaml(GerritNetwork.class, this.getClass(), inputFile);
+ GerritClusterIstioGateway gatewayDependent = new GerritClusterIstioGateway();
+ Gateway gatewayResult = gatewayDependent.desired(gerritNetwork, null);
+ Gateway expectedGateway =
+ ReconcilerUtils.loadYaml(Gateway.class, this.getClass(), expectedGatewayOutputFile);
+ assertThat(gatewayResult.getSpec()).isEqualTo(expectedGateway.getSpec());
+
+ GerritIstioVirtualService virtualServiceDependent = new GerritIstioVirtualService();
+ VirtualService virtualServiceResult = virtualServiceDependent.desired(gerritNetwork, null);
+ VirtualService expectedVirtualService =
+ ReconcilerUtils.loadYaml(
+ VirtualService.class, this.getClass(), expectedVirtualServiceOutputFile);
+ assertThat(virtualServiceResult.getSpec()).isEqualTo(expectedVirtualService.getSpec());
+ }
+
+ private static Stream<Arguments> provideYamlManifests() {
+ return Stream.of(
+ Arguments.of(
+ "../../gerritnetwork_primary_replica_tls.yaml",
+ "gateway_tls.yaml",
+ "virtualservice_primary_replica.yaml"),
+ Arguments.of(
+ "../../gerritnetwork_primary_replica.yaml",
+ "gateway.yaml",
+ "virtualservice_primary_replica.yaml"),
+ Arguments.of(
+ "../../gerritnetwork_primary.yaml", "gateway.yaml", "virtualservice_primary.yaml"),
+ Arguments.of(
+ "../../gerritnetwork_replica.yaml", "gateway.yaml", "virtualservice_replica.yaml"),
+ Arguments.of(
+ "../../gerritnetwork_receiver_replica.yaml",
+ "gateway.yaml",
+ "virtualservice_receiver_replica.yaml"),
+ Arguments.of(
+ "../../gerritnetwork_receiver_replica_tls.yaml",
+ "gateway_tls.yaml",
+ "virtualservice_receiver_replica.yaml"),
+ Arguments.of(
+ "../../gerritnetwork_primary_ssh.yaml",
+ "gateway_primary_ssh.yaml",
+ "virtualservice_primary_ssh.yaml"),
+ Arguments.of(
+ "../../gerritnetwork_replica_ssh.yaml",
+ "gateway_replica_ssh.yaml",
+ "virtualservice_replica_ssh.yaml"),
+ Arguments.of(
+ "../../gerritnetwork_primary_replica_ssh.yaml",
+ "gateway_primary_replica_ssh.yaml",
+ "virtualservice_primary_replica_ssh.yaml"),
+ Arguments.of(
+ "../../gerritnetwork_receiver_replica_ssh.yaml",
+ "gateway_receiver_replica_ssh.yaml",
+ "virtualservice_receiver_replica_ssh.yaml"));
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/receiver/AbstractClusterManagedReceiverE2E.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/receiver/AbstractClusterManagedReceiverE2E.java
new file mode 100644
index 0000000..557e140
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/receiver/AbstractClusterManagedReceiverE2E.java
@@ -0,0 +1,110 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.receiver;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import com.google.gerrit.k8s.operator.test.AbstractGerritOperatorE2ETest;
+import com.google.gerrit.k8s.operator.test.ReceiverUtil;
+import com.google.gerrit.k8s.operator.test.TestGerrit;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplate;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplateSpec.GerritMode;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.ReceiverTemplate;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.ReceiverTemplateSpec;
+import io.fabric8.kubernetes.api.model.ObjectMeta;
+import io.fabric8.kubernetes.api.model.ObjectMetaBuilder;
+import java.io.File;
+import java.net.URL;
+import java.nio.file.Path;
+import org.apache.http.client.utils.URIBuilder;
+import org.eclipse.jgit.api.Git;
+import org.eclipse.jgit.revwalk.RevCommit;
+import org.eclipse.jgit.transport.CredentialsProvider;
+import org.eclipse.jgit.transport.RefSpec;
+import org.eclipse.jgit.transport.UsernamePasswordCredentialsProvider;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
+
+public abstract class AbstractClusterManagedReceiverE2E extends AbstractGerritOperatorE2ETest {
+ private static final String GERRIT_NAME = "gerrit";
+ private ReceiverTemplate receiver;
+ private GerritTemplate gerrit;
+
+ @BeforeEach
+ public void setupComponents() throws Exception {
+ gerrit = TestGerrit.createGerritTemplate(GERRIT_NAME, GerritMode.REPLICA);
+ gerritCluster.addGerrit(gerrit);
+
+ receiver = new ReceiverTemplate();
+ ObjectMeta receiverMeta = new ObjectMetaBuilder().withName("receiver").build();
+ receiver.setMetadata(receiverMeta);
+ ReceiverTemplateSpec receiverTemplateSpec = new ReceiverTemplateSpec();
+ receiverTemplateSpec.setReplicas(2);
+ receiverTemplateSpec.setCredentialSecretRef(ReceiverUtil.CREDENTIALS_SECRET_NAME);
+ receiver.setSpec(receiverTemplateSpec);
+ gerritCluster.setReceiver(receiver);
+ gerritCluster.deploy();
+ }
+
+ @Test
+ public void testProjectLifecycle(@TempDir Path tempDir) throws Exception {
+ GerritCluster cluster = gerritCluster.getGerritCluster();
+ assertProjectLifecycle(cluster, tempDir);
+ }
+
+ private void assertProjectLifecycle(GerritCluster cluster, Path tempDir) throws Exception {
+ assertThat(
+ ReceiverUtil.sendReceiverApiRequest(cluster, "PUT", "/a/projects/test.git"),
+ is(equalTo(201)));
+ CredentialsProvider gerritCredentials =
+ new UsernamePasswordCredentialsProvider(
+ testProps.getGerritUser(), testProps.getGerritPwd());
+ Git git =
+ Git.cloneRepository()
+ .setURI(getGerritUrl("/test.git").toString())
+ .setCredentialsProvider(gerritCredentials)
+ .setDirectory(tempDir.toFile())
+ .call();
+ new File("test.txt").createNewFile();
+ git.add().addFilepattern(".").call();
+ RevCommit commit = git.commit().setMessage("test commit").call();
+ git.push()
+ .setCredentialsProvider(
+ new UsernamePasswordCredentialsProvider(
+ ReceiverUtil.RECEIVER_TEST_USER, ReceiverUtil.RECEIVER_TEST_PASSWORD))
+ .setRefSpecs(new RefSpec("refs/heads/master"))
+ .call();
+ assertTrue(
+ git.lsRemote().setCredentialsProvider(gerritCredentials).setRemote("origin").call().stream()
+ .anyMatch(ref -> ref.getObjectId().equals(commit.getId())));
+ assertThat(
+ ReceiverUtil.sendReceiverApiRequest(cluster, "DELETE", "/a/projects/test.git"),
+ is(equalTo(204)));
+ }
+
+ private URL getGerritUrl(String path) throws Exception {
+ return new URIBuilder()
+ .setScheme("https")
+ .setHost(gerritCluster.getHostname())
+ .setPath(path)
+ .build()
+ .toURL();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/receiver/ClusterManagedReceiverWithIngressE2E.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/receiver/ClusterManagedReceiverWithIngressE2E.java
new file mode 100644
index 0000000..4dd8bd3
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/receiver/ClusterManagedReceiverWithIngressE2E.java
@@ -0,0 +1,25 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.receiver;
+
+import com.google.gerrit.k8s.operator.network.IngressType;
+
+public class ClusterManagedReceiverWithIngressE2E extends AbstractClusterManagedReceiverE2E {
+
+ @Override
+ protected IngressType getIngressType() {
+ return IngressType.INGRESS;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/receiver/ClusterManagedReceiverWithIstioE2E.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/receiver/ClusterManagedReceiverWithIstioE2E.java
new file mode 100644
index 0000000..e76303c
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/receiver/ClusterManagedReceiverWithIstioE2E.java
@@ -0,0 +1,24 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.receiver;
+
+import com.google.gerrit.k8s.operator.network.IngressType;
+
+public class ClusterManagedReceiverWithIstioE2E extends AbstractClusterManagedReceiverE2E {
+ @Override
+ protected IngressType getIngressType() {
+ return IngressType.ISTIO;
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/receiver/dependent/ReceiverTest.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/receiver/dependent/ReceiverTest.java
new file mode 100644
index 0000000..8698b1a
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/receiver/dependent/ReceiverTest.java
@@ -0,0 +1,48 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.receiver.dependent;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.Receiver;
+import io.fabric8.kubernetes.api.model.Service;
+import io.fabric8.kubernetes.api.model.apps.Deployment;
+import io.javaoperatorsdk.operator.ReconcilerUtils;
+import java.util.stream.Stream;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
+
+public class ReceiverTest {
+ @ParameterizedTest
+ @MethodSource("provideYamlManifests")
+ public void expectedReceiverComponentsCreated(
+ String inputFile, String expectedDeployment, String expectedService) {
+ Receiver input = ReconcilerUtils.loadYaml(Receiver.class, this.getClass(), inputFile);
+ ReceiverDeployment dependentDeployment = new ReceiverDeployment();
+ assertThat(dependentDeployment.desired(input, null))
+ .isEqualTo(ReconcilerUtils.loadYaml(Deployment.class, this.getClass(), expectedDeployment));
+
+ ReceiverService dependentService = new ReceiverService();
+ assertThat(dependentService.desired(input, null))
+ .isEqualTo(ReconcilerUtils.loadYaml(Service.class, this.getClass(), expectedService));
+ }
+
+ private static Stream<Arguments> provideYamlManifests() {
+ return Stream.of(
+ Arguments.of("../receiver.yaml", "deployment.yaml", "service.yaml"),
+ Arguments.of("../receiver_minimal.yaml", "deployment_minimal.yaml", "service.yaml"));
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/server/GerritAdmissionWebhookTest.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/server/GerritAdmissionWebhookTest.java
new file mode 100644
index 0000000..32b342d
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/server/GerritAdmissionWebhookTest.java
@@ -0,0 +1,182 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.server;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.gerrit.k8s.operator.test.TestAdmissionWebhookServer;
+import com.google.gerrit.k8s.operator.v1alpha.admission.servlet.GerritAdmissionWebhook;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritClusterSpec;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritSpec;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplateSpec.GerritMode;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.Receiver;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GerritClusterIngressConfig;
+import io.fabric8.kubernetes.api.model.DefaultKubernetesResourceList;
+import io.fabric8.kubernetes.api.model.HasMetadata;
+import io.fabric8.kubernetes.api.model.ObjectMeta;
+import io.fabric8.kubernetes.api.model.ObjectMetaBuilder;
+import io.fabric8.kubernetes.api.model.admission.v1.AdmissionRequest;
+import io.fabric8.kubernetes.api.model.admission.v1.AdmissionReview;
+import io.fabric8.kubernetes.client.server.mock.KubernetesServer;
+import io.fabric8.kubernetes.internal.KubernetesDeserializer;
+import jakarta.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.HttpURLConnection;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.Map;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.eclipse.jetty.http.HttpMethod;
+import org.eclipse.jgit.lib.Config;
+import org.junit.Rule;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
+import org.junit.jupiter.api.TestInstance.Lifecycle;
+
+@TestInstance(Lifecycle.PER_CLASS)
+public class GerritAdmissionWebhookTest {
+ private static final String NAMESPACE = "test";
+ private static final String LIST_GERRITS_PATH =
+ String.format(
+ "/apis/%s/namespaces/%s/%s",
+ HasMetadata.getApiVersion(Gerrit.class), NAMESPACE, HasMetadata.getPlural(Gerrit.class));
+ private static final String LIST_GERRIT_CLUSTERS_PATH =
+ String.format(
+ "/apis/%s/namespaces/%s/%s",
+ HasMetadata.getApiVersion(GerritCluster.class),
+ NAMESPACE,
+ HasMetadata.getPlural(GerritCluster.class));
+ private TestAdmissionWebhookServer server;
+
+ @Rule public KubernetesServer kubernetesServer = new KubernetesServer();
+
+ @BeforeAll
+ public void setup() throws Exception {
+ KubernetesDeserializer.registerCustomKind(
+ "gerritoperator.google.com/v1alpha2", "Gerrit", Gerrit.class);
+ KubernetesDeserializer.registerCustomKind(
+ "gerritoperator.google.com/v1alpha1", "Receiver", Receiver.class);
+ server = new TestAdmissionWebhookServer();
+
+ kubernetesServer.before();
+
+ GerritAdmissionWebhook webhook = new GerritAdmissionWebhook();
+ server.registerWebhook(webhook);
+ server.start();
+ }
+
+ @Test
+ public void testInvalidGerritConfigRejected() throws Exception {
+ String clusterName = "gerrit";
+ Config gerritConfig = new Config();
+ gerritConfig.setString("container", null, "user", "gerrit");
+ Gerrit gerrit = createGerrit(clusterName, gerritConfig);
+ kubernetesServer
+ .expect()
+ .get()
+ .withPath(LIST_GERRITS_PATH)
+ .andReturn(HttpURLConnection.HTTP_OK, new DefaultKubernetesResourceList<Gerrit>())
+ .times(2);
+
+ mockGerritCluster(clusterName);
+
+ HttpURLConnection http = sendAdmissionRequest(gerrit);
+
+ AdmissionReview response =
+ new ObjectMapper().readValue(http.getInputStream(), AdmissionReview.class);
+
+ assertThat(http.getResponseCode(), is(equalTo(HttpServletResponse.SC_OK)));
+ assertThat(response.getResponse().getAllowed(), is(true));
+
+ gerritConfig.setString("container", null, "user", "invalid");
+ Gerrit gerrit2 = createGerrit(clusterName, gerritConfig);
+ HttpURLConnection http2 = sendAdmissionRequest(gerrit2);
+
+ AdmissionReview response2 =
+ new ObjectMapper().readValue(http2.getInputStream(), AdmissionReview.class);
+
+ assertThat(http2.getResponseCode(), is(equalTo(HttpServletResponse.SC_OK)));
+ assertThat(response2.getResponse().getAllowed(), is(false));
+ }
+
+ private void mockGerritCluster(String name) {
+ GerritCluster cluster = new GerritCluster();
+ cluster.setMetadata(new ObjectMetaBuilder().withName(name).withNamespace(NAMESPACE).build());
+ GerritClusterSpec clusterSpec = new GerritClusterSpec();
+ GerritClusterIngressConfig ingressConfig = new GerritClusterIngressConfig();
+ ingressConfig.setEnabled(false);
+ clusterSpec.setIngress(ingressConfig);
+ clusterSpec.setServerId("test");
+ cluster.setSpec(clusterSpec);
+
+ kubernetesServer
+ .expect()
+ .get()
+ .withPath(LIST_GERRIT_CLUSTERS_PATH + "/" + name)
+ .andReturn(HttpURLConnection.HTTP_OK, cluster)
+ .always();
+ }
+
+ private Gerrit createGerrit(String cluster, Config gerritConfig) {
+ ObjectMeta meta =
+ new ObjectMetaBuilder()
+ .withName(RandomStringUtils.random(10))
+ .withNamespace(NAMESPACE)
+ .build();
+ GerritSpec gerritSpec = new GerritSpec();
+ gerritSpec.setMode(GerritMode.PRIMARY);
+ if (gerritConfig != null) {
+ gerritSpec.setConfigFiles(Map.of("gerrit.config", gerritConfig.toText()));
+ }
+ Gerrit gerrit = new Gerrit();
+ gerrit.setMetadata(meta);
+ gerrit.setSpec(gerritSpec);
+ return gerrit;
+ }
+
+ private HttpURLConnection sendAdmissionRequest(Gerrit gerrit)
+ throws MalformedURLException, IOException {
+ HttpURLConnection http =
+ (HttpURLConnection)
+ new URL("http://localhost:8080/admission/v1alpha/gerrit").openConnection();
+ http.setRequestMethod(HttpMethod.POST.asString());
+ http.setRequestProperty("Content-Type", "application/json");
+ http.setDoOutput(true);
+
+ AdmissionRequest admissionReq = new AdmissionRequest();
+ admissionReq.setObject(gerrit);
+ AdmissionReview admissionReview = new AdmissionReview();
+ admissionReview.setRequest(admissionReq);
+
+ try (OutputStream os = http.getOutputStream()) {
+ byte[] input = new ObjectMapper().writer().writeValueAsBytes(admissionReview);
+ os.write(input, 0, input.length);
+ }
+ return http;
+ }
+
+ @AfterAll
+ public void shutdown() throws Exception {
+ server.stop();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/server/GerritClusterAdmissionWebhookTest.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/server/GerritClusterAdmissionWebhookTest.java
new file mode 100644
index 0000000..55526c7
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/server/GerritClusterAdmissionWebhookTest.java
@@ -0,0 +1,195 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.server;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.gerrit.k8s.operator.test.ReceiverUtil;
+import com.google.gerrit.k8s.operator.test.TestAdmissionWebhookServer;
+import com.google.gerrit.k8s.operator.test.TestGerrit;
+import com.google.gerrit.k8s.operator.test.TestGerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.admission.servlet.GerritAdmissionWebhook;
+import com.google.gerrit.k8s.operator.v1alpha.admission.servlet.GerritClusterAdmissionWebhook;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplate;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplateSpec.GerritMode;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.Receiver;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.ReceiverTemplate;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.ReceiverTemplateSpec;
+import io.fabric8.kubernetes.api.model.ObjectMeta;
+import io.fabric8.kubernetes.api.model.ObjectMetaBuilder;
+import io.fabric8.kubernetes.api.model.admission.v1.AdmissionRequest;
+import io.fabric8.kubernetes.api.model.admission.v1.AdmissionReview;
+import io.fabric8.kubernetes.client.server.mock.KubernetesServer;
+import io.fabric8.kubernetes.internal.KubernetesDeserializer;
+import jakarta.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.HttpURLConnection;
+import java.net.MalformedURLException;
+import java.net.URL;
+import org.eclipse.jetty.http.HttpMethod;
+import org.eclipse.jgit.lib.Config;
+import org.junit.Rule;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
+import org.junit.jupiter.api.TestInstance.Lifecycle;
+
+@TestInstance(Lifecycle.PER_CLASS)
+public class GerritClusterAdmissionWebhookTest {
+ private static final String NAMESPACE = "test";
+ private TestAdmissionWebhookServer server;
+
+ @Rule public KubernetesServer kubernetesServer = new KubernetesServer();
+
+ @BeforeAll
+ public void setup() throws Exception {
+ KubernetesDeserializer.registerCustomKind(
+ "gerritoperator.google.com/v1alpha2", "Gerrit", Gerrit.class);
+ KubernetesDeserializer.registerCustomKind(
+ "gerritoperator.google.com/v1alpha2", "GerritCluster", GerritCluster.class);
+ KubernetesDeserializer.registerCustomKind(
+ "gerritoperator.google.com/v1alpha2", "Receiver", Receiver.class);
+ server = new TestAdmissionWebhookServer();
+
+ kubernetesServer.before();
+
+ server.registerWebhook(new GerritClusterAdmissionWebhook());
+ server.registerWebhook(new GerritAdmissionWebhook());
+ server.start();
+ }
+
+ @Test
+ public void testOnlySinglePrimaryGerritIsAcceptedPerGerritCluster() throws Exception {
+ Config cfg = new Config();
+ cfg.fromText(TestGerrit.DEFAULT_GERRIT_CONFIG);
+ GerritTemplate gerrit1 = TestGerrit.createGerritTemplate("gerrit1", GerritMode.PRIMARY, cfg);
+ TestGerritCluster gerritCluster =
+ new TestGerritCluster(kubernetesServer.getClient(), NAMESPACE);
+ gerritCluster.addGerrit(gerrit1);
+ GerritCluster cluster = gerritCluster.build();
+
+ HttpURLConnection http = sendAdmissionRequest(cluster);
+
+ AdmissionReview response =
+ new ObjectMapper().readValue(http.getInputStream(), AdmissionReview.class);
+
+ assertThat(http.getResponseCode(), is(equalTo(HttpServletResponse.SC_OK)));
+ assertThat(response.getResponse().getAllowed(), is(true));
+
+ GerritTemplate gerrit2 = TestGerrit.createGerritTemplate("gerrit2", GerritMode.PRIMARY, cfg);
+ gerritCluster.addGerrit(gerrit2);
+ HttpURLConnection http2 = sendAdmissionRequest(gerritCluster.build());
+
+ AdmissionReview response2 =
+ new ObjectMapper().readValue(http2.getInputStream(), AdmissionReview.class);
+
+ assertThat(http2.getResponseCode(), is(equalTo(HttpServletResponse.SC_OK)));
+ assertThat(response2.getResponse().getAllowed(), is(false));
+ assertThat(
+ response2.getResponse().getStatus().getCode(),
+ is(equalTo(HttpServletResponse.SC_CONFLICT)));
+ }
+
+ @Test
+ public void testPrimaryGerritAndReceiverAreNotAcceptedInSameGerritCluster() throws Exception {
+ Config cfg = new Config();
+ cfg.fromText(TestGerrit.DEFAULT_GERRIT_CONFIG);
+ GerritTemplate gerrit = TestGerrit.createGerritTemplate("gerrit1", GerritMode.PRIMARY, cfg);
+ TestGerritCluster gerritCluster =
+ new TestGerritCluster(kubernetesServer.getClient(), NAMESPACE);
+ gerritCluster.addGerrit(gerrit);
+
+ ReceiverTemplate receiver = new ReceiverTemplate();
+ ObjectMeta receiverMeta = new ObjectMetaBuilder().withName("receiver").build();
+ receiver.setMetadata(receiverMeta);
+ ReceiverTemplateSpec receiverTemplateSpec = new ReceiverTemplateSpec();
+ receiverTemplateSpec.setReplicas(2);
+ receiverTemplateSpec.setCredentialSecretRef(ReceiverUtil.CREDENTIALS_SECRET_NAME);
+ receiver.setSpec(receiverTemplateSpec);
+
+ gerritCluster.setReceiver(receiver);
+ HttpURLConnection http2 = sendAdmissionRequest(gerritCluster.build());
+
+ AdmissionReview response2 =
+ new ObjectMapper().readValue(http2.getInputStream(), AdmissionReview.class);
+
+ assertThat(http2.getResponseCode(), is(equalTo(HttpServletResponse.SC_OK)));
+ assertThat(response2.getResponse().getAllowed(), is(false));
+ assertThat(
+ response2.getResponse().getStatus().getCode(),
+ is(equalTo(HttpServletResponse.SC_CONFLICT)));
+ }
+
+ @Test
+ public void testPrimaryAndReplicaAreAcceptedInSameGerritCluster() throws Exception {
+ Config cfg = new Config();
+ cfg.fromText(TestGerrit.DEFAULT_GERRIT_CONFIG);
+ GerritTemplate gerrit1 = TestGerrit.createGerritTemplate("gerrit1", GerritMode.PRIMARY, cfg);
+ TestGerritCluster gerritCluster =
+ new TestGerritCluster(kubernetesServer.getClient(), NAMESPACE);
+ gerritCluster.addGerrit(gerrit1);
+
+ HttpURLConnection http = sendAdmissionRequest(gerritCluster.build());
+
+ AdmissionReview response =
+ new ObjectMapper().readValue(http.getInputStream(), AdmissionReview.class);
+
+ assertThat(http.getResponseCode(), is(equalTo(HttpServletResponse.SC_OK)));
+ assertThat(response.getResponse().getAllowed(), is(true));
+
+ GerritTemplate gerrit2 = TestGerrit.createGerritTemplate("gerrit2", GerritMode.REPLICA, cfg);
+ gerritCluster.addGerrit(gerrit2);
+ HttpURLConnection http2 = sendAdmissionRequest(gerritCluster.build());
+
+ AdmissionReview response2 =
+ new ObjectMapper().readValue(http2.getInputStream(), AdmissionReview.class);
+
+ assertThat(http.getResponseCode(), is(equalTo(HttpServletResponse.SC_OK)));
+ assertThat(response2.getResponse().getAllowed(), is(true));
+ }
+
+ private HttpURLConnection sendAdmissionRequest(GerritCluster gerritCluster)
+ throws MalformedURLException, IOException {
+ HttpURLConnection http =
+ (HttpURLConnection)
+ new URL("http://localhost:8080/admission/v1alpha/gerritcluster").openConnection();
+ http.setRequestMethod(HttpMethod.POST.asString());
+ http.setRequestProperty("Content-Type", "application/json");
+ http.setDoOutput(true);
+
+ AdmissionRequest admissionReq = new AdmissionRequest();
+ admissionReq.setObject(gerritCluster);
+ AdmissionReview admissionReview = new AdmissionReview();
+ admissionReview.setRequest(admissionReq);
+
+ try (OutputStream os = http.getOutputStream()) {
+ byte[] input = new ObjectMapper().writer().writeValueAsBytes(admissionReview);
+ os.write(input, 0, input.length);
+ }
+ return http;
+ }
+
+ @AfterAll
+ public void shutdown() throws Exception {
+ server.stop();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/server/GitGcAdmissionWebhookTest.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/server/GitGcAdmissionWebhookTest.java
new file mode 100644
index 0000000..5553e95
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/server/GitGcAdmissionWebhookTest.java
@@ -0,0 +1,256 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.server;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.gerrit.k8s.operator.test.TestAdmissionWebhookServer;
+import com.google.gerrit.k8s.operator.v1alpha.admission.servlet.GitGcAdmissionWebhook;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gitgc.GitGarbageCollection;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gitgc.GitGarbageCollectionSpec;
+import io.fabric8.kubernetes.api.model.DefaultKubernetesResourceList;
+import io.fabric8.kubernetes.api.model.HasMetadata;
+import io.fabric8.kubernetes.api.model.ObjectMetaBuilder;
+import io.fabric8.kubernetes.api.model.admission.v1.AdmissionRequest;
+import io.fabric8.kubernetes.api.model.admission.v1.AdmissionReview;
+import io.fabric8.kubernetes.client.server.mock.KubernetesServer;
+import io.fabric8.kubernetes.internal.KubernetesDeserializer;
+import jakarta.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.HttpURLConnection;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.List;
+import java.util.Set;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.eclipse.jetty.http.HttpMethod;
+import org.junit.Rule;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.DisplayName;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
+import org.junit.jupiter.api.TestInstance.Lifecycle;
+
+@TestInstance(Lifecycle.PER_CLASS)
+public class GitGcAdmissionWebhookTest {
+ private static final String NAMESPACE = "test";
+ private static final String LIST_GITGCS_PATH =
+ String.format(
+ "/apis/%s/namespaces/%s/%s",
+ HasMetadata.getApiVersion(GitGarbageCollection.class),
+ NAMESPACE,
+ HasMetadata.getPlural(GitGarbageCollection.class));
+ private TestAdmissionWebhookServer server;
+
+ @Rule public KubernetesServer kubernetesServer = new KubernetesServer();
+
+ @BeforeAll
+ public void setup() throws Exception {
+ KubernetesDeserializer.registerCustomKind(
+ "gerritoperator.google.com/v1alpha16", "GerritCluster", GerritCluster.class);
+ KubernetesDeserializer.registerCustomKind(
+ "gerritoperator.google.com/v1alpha1", "GitGarbageCollection", GitGarbageCollection.class);
+ server = new TestAdmissionWebhookServer();
+
+ kubernetesServer.before();
+
+ GitGcAdmissionWebhook webhook = new GitGcAdmissionWebhook(kubernetesServer.getClient());
+ server.registerWebhook(webhook);
+ server.start();
+ }
+
+ @Test
+ @DisplayName("Only a single GitGC that works on all projects in site is allowed.")
+ public void testOnlySingleGitGcWorkingOnAllProjectsIsAllowed() throws Exception {
+ GitGarbageCollection gitGc = createCompleteGitGc();
+ kubernetesServer
+ .expect()
+ .get()
+ .withPath(LIST_GITGCS_PATH)
+ .andReturn(
+ HttpURLConnection.HTTP_OK, new DefaultKubernetesResourceList<GitGarbageCollection>())
+ .once();
+
+ HttpURLConnection http = sendAdmissionRequest(gitGc);
+
+ AdmissionReview response =
+ new ObjectMapper().readValue(http.getInputStream(), AdmissionReview.class);
+
+ assertThat(http.getResponseCode(), is(equalTo(HttpServletResponse.SC_OK)));
+ assertThat(response.getResponse().getAllowed(), is(true));
+
+ DefaultKubernetesResourceList<GitGarbageCollection> existingGitGcs =
+ new DefaultKubernetesResourceList<GitGarbageCollection>();
+ existingGitGcs.setItems(List.of(createCompleteGitGc()));
+ kubernetesServer
+ .expect()
+ .get()
+ .withPath(LIST_GITGCS_PATH)
+ .andReturn(HttpURLConnection.HTTP_OK, existingGitGcs)
+ .once();
+
+ HttpURLConnection http2 = sendAdmissionRequest(gitGc);
+
+ AdmissionReview response2 =
+ new ObjectMapper().readValue(http2.getInputStream(), AdmissionReview.class);
+
+ assertThat(http2.getResponseCode(), is(equalTo(HttpServletResponse.SC_OK)));
+ assertThat(response2.getResponse().getAllowed(), is(false));
+ assertThat(
+ response2.getResponse().getStatus().getCode(),
+ is(equalTo(HttpServletResponse.SC_CONFLICT)));
+ }
+
+ @Test
+ @DisplayName(
+ "A GitGc configured to work on all projects and selective GitGcs are allowed to exist at the same time.")
+ public void testSelectiveAndCompleteGitGcAreAllowedTogether() throws Exception {
+ DefaultKubernetesResourceList<GitGarbageCollection> existingGitGcs =
+ new DefaultKubernetesResourceList<GitGarbageCollection>();
+ existingGitGcs.setItems(List.of(createCompleteGitGc()));
+ kubernetesServer
+ .expect()
+ .get()
+ .withPath(LIST_GITGCS_PATH)
+ .andReturn(HttpURLConnection.HTTP_OK, existingGitGcs)
+ .once();
+
+ GitGarbageCollection gitGc2 = createGitGcForProjects(Set.of("project3"));
+ HttpURLConnection http2 = sendAdmissionRequest(gitGc2);
+
+ AdmissionReview response2 =
+ new ObjectMapper().readValue(http2.getInputStream(), AdmissionReview.class);
+
+ assertThat(http2.getResponseCode(), is(equalTo(HttpServletResponse.SC_OK)));
+ assertThat(response2.getResponse().getAllowed(), is(true));
+ }
+
+ @Test
+ @DisplayName("Multiple selectve GitGcs working on a different set of projects are allowed.")
+ public void testNonConflictingSelectiveGcsAreAllowed() throws Exception {
+ GitGarbageCollection gitGc = createGitGcForProjects(Set.of("project1", "project2"));
+ DefaultKubernetesResourceList<GitGarbageCollection> existingGitGcs =
+ new DefaultKubernetesResourceList<GitGarbageCollection>();
+ existingGitGcs.setItems(List.of(gitGc));
+ kubernetesServer
+ .expect()
+ .get()
+ .withPath(LIST_GITGCS_PATH)
+ .andReturn(HttpURLConnection.HTTP_OK, existingGitGcs)
+ .once();
+
+ GitGarbageCollection gitGc2 = createGitGcForProjects(Set.of("project3"));
+ HttpURLConnection http2 = sendAdmissionRequest(gitGc2);
+
+ AdmissionReview response2 =
+ new ObjectMapper().readValue(http2.getInputStream(), AdmissionReview.class);
+
+ assertThat(http2.getResponseCode(), is(equalTo(HttpServletResponse.SC_OK)));
+ assertThat(response2.getResponse().getAllowed(), is(true));
+ }
+
+ @Test
+ @DisplayName("Multiple selectve GitGcs working on the same project(s) are not allowed.")
+ public void testConflictingSelectiveGcsNotAllowed() throws Exception {
+ GitGarbageCollection gitGc = createGitGcForProjects(Set.of("project1", "project2"));
+ kubernetesServer
+ .expect()
+ .get()
+ .withPath(LIST_GITGCS_PATH)
+ .andReturn(
+ HttpURLConnection.HTTP_OK, new DefaultKubernetesResourceList<GitGarbageCollection>())
+ .once();
+
+ HttpURLConnection http = sendAdmissionRequest(gitGc);
+
+ AdmissionReview response =
+ new ObjectMapper().readValue(http.getInputStream(), AdmissionReview.class);
+
+ assertThat(http.getResponseCode(), is(equalTo(HttpServletResponse.SC_OK)));
+ assertThat(response.getResponse().getAllowed(), is(true));
+
+ DefaultKubernetesResourceList<GitGarbageCollection> existingGitGcs =
+ new DefaultKubernetesResourceList<GitGarbageCollection>();
+ existingGitGcs.setItems(List.of(gitGc));
+ kubernetesServer
+ .expect()
+ .get()
+ .withPath(LIST_GITGCS_PATH)
+ .andReturn(HttpURLConnection.HTTP_OK, existingGitGcs)
+ .once();
+
+ GitGarbageCollection gitGc2 = createGitGcForProjects(Set.of("project1"));
+ HttpURLConnection http2 = sendAdmissionRequest(gitGc2);
+
+ AdmissionReview response2 =
+ new ObjectMapper().readValue(http2.getInputStream(), AdmissionReview.class);
+
+ assertThat(http2.getResponseCode(), is(equalTo(HttpServletResponse.SC_OK)));
+ assertThat(response2.getResponse().getAllowed(), is(false));
+ assertThat(
+ response2.getResponse().getStatus().getCode(),
+ is(equalTo(HttpServletResponse.SC_CONFLICT)));
+ }
+
+ private GitGarbageCollection createCompleteGitGc() {
+ return createGitGcForProjects(Set.of());
+ }
+
+ private GitGarbageCollection createGitGcForProjects(Set<String> projects) {
+ GitGarbageCollectionSpec spec = new GitGarbageCollectionSpec();
+ spec.setProjects(projects);
+ GitGarbageCollection gitGc = new GitGarbageCollection();
+ gitGc.setMetadata(
+ new ObjectMetaBuilder()
+ .withName(RandomStringUtils.randomAlphabetic(10))
+ .withUid(RandomStringUtils.randomAlphabetic(10))
+ .withNamespace(NAMESPACE)
+ .build());
+ gitGc.setSpec(spec);
+ return gitGc;
+ }
+
+ private HttpURLConnection sendAdmissionRequest(GitGarbageCollection gitGc)
+ throws MalformedURLException, IOException {
+ HttpURLConnection http =
+ (HttpURLConnection)
+ new URL("http://localhost:8080/admission/v1alpha/gitgc").openConnection();
+ http.setRequestMethod(HttpMethod.POST.asString());
+ http.setRequestProperty("Content-Type", "application/json");
+ http.setDoOutput(true);
+
+ AdmissionRequest admissionReq = new AdmissionRequest();
+ admissionReq.setObject(gitGc);
+ AdmissionReview admissionReview = new AdmissionReview();
+ admissionReview.setRequest(admissionReq);
+
+ try (OutputStream os = http.getOutputStream()) {
+ byte[] input = new ObjectMapper().writer().writeValueAsBytes(admissionReview);
+ os.write(input, 0, input.length);
+ }
+ return http;
+ }
+
+ @AfterAll
+ public void shutdown() throws Exception {
+ server.stop();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/AbstractGerritOperatorE2ETest.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/AbstractGerritOperatorE2ETest.java
new file mode 100644
index 0000000..ee1aa01
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/AbstractGerritOperatorE2ETest.java
@@ -0,0 +1,140 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.test;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.k8s.operator.cluster.GerritClusterReconciler;
+import com.google.gerrit.k8s.operator.gerrit.GerritReconciler;
+import com.google.gerrit.k8s.operator.gitgc.GitGarbageCollectionReconciler;
+import com.google.gerrit.k8s.operator.network.GerritNetworkReconcilerProvider;
+import com.google.gerrit.k8s.operator.network.IngressType;
+import com.google.gerrit.k8s.operator.receiver.ReceiverReconciler;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gitgc.GitGarbageCollection;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.network.GerritNetwork;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.Receiver;
+import io.fabric8.kubernetes.api.model.Secret;
+import io.fabric8.kubernetes.api.model.SecretBuilder;
+import io.fabric8.kubernetes.client.Config;
+import io.fabric8.kubernetes.client.KubernetesClient;
+import io.fabric8.kubernetes.client.KubernetesClientBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Reconciler;
+import io.javaoperatorsdk.operator.junit.LocallyRunOperatorExtension;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Base64;
+import java.util.Map;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.extension.RegisterExtension;
+import org.mockito.Mockito;
+
+public abstract class AbstractGerritOperatorE2ETest {
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+ protected static final KubernetesClient client = getKubernetesClient();
+ public static final String IMAGE_PULL_SECRET_NAME = "image-pull-secret";
+ public static final TestProperties testProps = new TestProperties();
+
+ protected GerritReconciler gerritReconciler = Mockito.spy(new GerritReconciler(client));
+ protected TestGerritCluster gerritCluster;
+ protected TestSecureConfig secureConfig;
+ protected Secret receiverCredentials;
+
+ @RegisterExtension
+ protected LocallyRunOperatorExtension operator =
+ LocallyRunOperatorExtension.builder()
+ .withNamespaceDeleteTimeout(120)
+ .waitForNamespaceDeletion(true)
+ .withReconciler(new GerritClusterReconciler())
+ .withReconciler(gerritReconciler)
+ .withReconciler(new GitGarbageCollectionReconciler(client))
+ .withReconciler(new ReceiverReconciler(client))
+ .withReconciler(getGerritNetworkReconciler())
+ .build();
+
+ @BeforeEach
+ void setup() {
+ Mockito.reset(gerritReconciler);
+ createImagePullSecret(client, operator.getNamespace());
+
+ secureConfig = new TestSecureConfig(client, testProps, operator.getNamespace());
+ secureConfig.createOrReplace();
+
+ receiverCredentials = ReceiverUtil.createCredentialsSecret(operator.getNamespace());
+
+ client.resource(receiverCredentials).inNamespace(operator.getNamespace()).createOrReplace();
+
+ gerritCluster = new TestGerritCluster(client, operator.getNamespace());
+ gerritCluster.setIngressType(getIngressType());
+ gerritCluster.deploy();
+ }
+
+ @AfterEach
+ void cleanup() {
+ client.resources(Gerrit.class).inNamespace(operator.getNamespace()).delete();
+ client.resources(Receiver.class).inNamespace(operator.getNamespace()).delete();
+ client.resources(GitGarbageCollection.class).inNamespace(operator.getNamespace()).delete();
+ client.resources(GerritCluster.class).inNamespace(operator.getNamespace()).delete();
+ client.resource(receiverCredentials).inNamespace(operator.getNamespace()).delete();
+ }
+
+ private static KubernetesClient getKubernetesClient() {
+ Config config;
+ try {
+ String kubeconfig = System.getenv("KUBECONFIG");
+ if (kubeconfig != null) {
+ config = Config.fromKubeconfig(Files.readString(Path.of(kubeconfig)));
+ return new KubernetesClientBuilder().withConfig(config).build();
+ }
+ logger.atWarning().log("KUBECONFIG variable not set. Using default config.");
+ } catch (IOException e) {
+ logger.atSevere().log("Failed to load kubeconfig. Trying default", e);
+ }
+ return new KubernetesClientBuilder().build();
+ }
+
+ private static void createImagePullSecret(KubernetesClient client, String namespace) {
+ StringBuilder secretBuilder = new StringBuilder();
+ secretBuilder.append("{\"auths\": {\"");
+ secretBuilder.append(testProps.getRegistry());
+ secretBuilder.append("\": {\"auth\": \"");
+ secretBuilder.append(
+ Base64.getEncoder()
+ .encodeToString(
+ String.format("%s:%s", testProps.getRegistryUser(), testProps.getRegistryPwd())
+ .getBytes()));
+ secretBuilder.append("\"}}}");
+ String data = Base64.getEncoder().encodeToString(secretBuilder.toString().getBytes());
+
+ Secret imagePullSecret =
+ new SecretBuilder()
+ .withType("kubernetes.io/dockerconfigjson")
+ .withNewMetadata()
+ .withName(IMAGE_PULL_SECRET_NAME)
+ .withNamespace(namespace)
+ .endMetadata()
+ .withData(Map.of(".dockerconfigjson", data))
+ .build();
+ client.resource(imagePullSecret).createOrReplace();
+ }
+
+ public Reconciler<GerritNetwork> getGerritNetworkReconciler() {
+ return new GerritNetworkReconcilerProvider(getIngressType()).get();
+ }
+
+ protected abstract IngressType getIngressType();
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/ReceiverUtil.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/ReceiverUtil.java
new file mode 100644
index 0000000..b6cdc5a
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/ReceiverUtil.java
@@ -0,0 +1,75 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.test;
+
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import io.fabric8.kubernetes.api.model.Secret;
+import io.fabric8.kubernetes.api.model.SecretBuilder;
+import java.net.HttpURLConnection;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.util.Base64;
+import java.util.Map;
+import org.apache.commons.codec.digest.Md5Crypt;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.http.client.utils.URIBuilder;
+
+public class ReceiverUtil {
+ public static final String RECEIVER_TEST_USER = "git";
+ public static final String RECEIVER_TEST_PASSWORD = RandomStringUtils.randomAlphanumeric(32);
+ public static final String CREDENTIALS_SECRET_NAME = "receiver-secret";
+ public static final TestProperties testProps = new TestProperties();
+
+ public static int sendReceiverApiRequest(GerritCluster gerritCluster, String method, String path)
+ throws Exception {
+ URL url = getReceiverUrl(gerritCluster, path);
+
+ HttpURLConnection con = (HttpURLConnection) url.openConnection();
+ try {
+ con.setRequestMethod(method);
+ String encodedAuth =
+ Base64.getEncoder()
+ .encodeToString(
+ String.format("%s:%s", RECEIVER_TEST_USER, RECEIVER_TEST_PASSWORD)
+ .getBytes(StandardCharsets.UTF_8));
+ con.setRequestProperty("Authorization", "Basic " + encodedAuth);
+ return con.getResponseCode();
+ } finally {
+ con.disconnect();
+ }
+ }
+
+ public static URL getReceiverUrl(GerritCluster gerritCluster, String path) throws Exception {
+ return new URIBuilder()
+ .setScheme("https")
+ .setHost(gerritCluster.getSpec().getIngress().getHost())
+ .setPath(path)
+ .build()
+ .toURL();
+ }
+
+ public static Secret createCredentialsSecret(String namespace) {
+ String enPasswd = Md5Crypt.md5Crypt(RECEIVER_TEST_PASSWORD.getBytes());
+ String htpasswdContent = RECEIVER_TEST_USER + ":" + enPasswd;
+ return new SecretBuilder()
+ .withNewMetadata()
+ .withNamespace(namespace)
+ .withName(CREDENTIALS_SECRET_NAME)
+ .endMetadata()
+ .withData(
+ Map.of(".htpasswd", Base64.getEncoder().encodeToString(htpasswdContent.getBytes())))
+ .build();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/TestAdmissionWebhookServer.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/TestAdmissionWebhookServer.java
new file mode 100644
index 0000000..90c55be
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/TestAdmissionWebhookServer.java
@@ -0,0 +1,52 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.test;
+
+import com.google.gerrit.k8s.operator.server.AdmissionWebhookServlet;
+import org.eclipse.jetty.server.Connector;
+import org.eclipse.jetty.server.HttpConnectionFactory;
+import org.eclipse.jetty.server.Server;
+import org.eclipse.jetty.server.ServerConnector;
+import org.eclipse.jetty.servlet.ServletHandler;
+import org.eclipse.jetty.servlet.ServletHolder;
+
+public class TestAdmissionWebhookServer {
+ public static final String KEYSTORE_PATH = "/operator/keystore.jks";
+ public static final String KEYSTORE_PWD_FILE = "/operator/keystore.password";
+ public static final int PORT = 8080;
+
+ private final Server server = new Server();
+ private final ServletHandler servletHandler = new ServletHandler();
+
+ public void start() throws Exception {
+ HttpConnectionFactory httpConnectionFactory = new HttpConnectionFactory();
+
+ ServerConnector connector = new ServerConnector(server, httpConnectionFactory);
+ connector.setPort(PORT);
+ server.setConnectors(new Connector[] {connector});
+ server.setHandler(servletHandler);
+
+ server.start();
+ }
+
+ public void registerWebhook(AdmissionWebhookServlet webhook) {
+ servletHandler.addServletWithMapping(
+ new ServletHolder(webhook), "/admission/" + webhook.getVersion() + "/" + webhook.getName());
+ }
+
+ public void stop() throws Exception {
+ server.stop();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/TestGerrit.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/TestGerrit.java
new file mode 100644
index 0000000..5762ea5
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/TestGerrit.java
@@ -0,0 +1,284 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.test;
+
+import static com.google.gerrit.k8s.operator.test.TestSecureConfig.SECURE_CONFIG_SECRET_NAME;
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static org.awaitility.Awaitility.await;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.k8s.operator.gerrit.dependent.GerritConfigMap;
+import com.google.gerrit.k8s.operator.gerrit.dependent.GerritInitConfigMap;
+import com.google.gerrit.k8s.operator.gerrit.dependent.GerritService;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.Gerrit;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritSite;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritSpec;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplate;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplateSpec;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplateSpec.GerritMode;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.ContainerImageConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GerritRepositoryConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GerritStorageConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.IngressConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.SharedStorage;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.StorageClassConfig;
+import io.fabric8.kubernetes.api.model.LocalObjectReference;
+import io.fabric8.kubernetes.api.model.ObjectMeta;
+import io.fabric8.kubernetes.api.model.ObjectMetaBuilder;
+import io.fabric8.kubernetes.api.model.Quantity;
+import io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder;
+import io.fabric8.kubernetes.client.KubernetesClient;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import org.eclipse.jgit.errors.ConfigInvalidException;
+import org.eclipse.jgit.lib.Config;
+
+public class TestGerrit {
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+ public static final TestProperties testProps = new TestProperties();
+ public static final String DEFAULT_GERRIT_CONFIG =
+ "[index]\n"
+ + " type = LUCENE\n"
+ + "[auth]\n"
+ + " type = LDAP\n"
+ + "[ldap]\n"
+ + " server = ldap://openldap.openldap.svc.cluster.local:1389\n"
+ + " accountBase = dc=example,dc=org\n"
+ + " username = cn=admin,dc=example,dc=org\n"
+ + "[httpd]\n"
+ + " requestLog = true\n"
+ + " gracefulStopTimeout = 1m\n"
+ + "[transfer]\n"
+ + " timeout = 120 s\n"
+ + "[user]\n"
+ + " name = Gerrit Code Review\n"
+ + " email = gerrit@example.com\n"
+ + " anonymousCoward = Unnamed User\n"
+ + "[container]\n"
+ + " javaOptions = -Xmx4g";
+
+ private final KubernetesClient client;
+ private final String name;
+ private final String namespace;
+ private final GerritMode mode;
+
+ private Gerrit gerrit = new Gerrit();
+ private Config config = defaultConfig();
+
+ public TestGerrit(
+ KubernetesClient client,
+ TestProperties testProps,
+ GerritMode mode,
+ String name,
+ String namespace) {
+ this.client = client;
+ this.mode = mode;
+ this.name = name;
+ this.namespace = namespace;
+ }
+
+ public TestGerrit(
+ KubernetesClient client, TestProperties testProps, String name, String namespace) {
+ this(client, testProps, GerritMode.PRIMARY, name, namespace);
+ }
+
+ public void build() {
+ createGerritCR();
+ }
+
+ public void deploy() {
+ build();
+ client.resource(gerrit).inNamespace(namespace).createOrReplace();
+ waitForGerritReadiness();
+ }
+
+ public void modifyGerritConfig(String section, String key, String value) {
+ config.setString(section, null, key, value);
+ }
+
+ public GerritSpec getSpec() {
+ return gerrit.getSpec();
+ }
+
+ public void setSpec(GerritSpec spec) {
+ gerrit.setSpec(spec);
+ deploy();
+ }
+
+ private static Config defaultConfig() {
+ Config cfg = new Config();
+ try {
+ cfg.fromText(DEFAULT_GERRIT_CONFIG);
+ } catch (ConfigInvalidException e) {
+ throw new IllegalStateException("Illegal default test configuration.");
+ }
+ return cfg;
+ }
+
+ public GerritTemplate createGerritTemplate() throws ConfigInvalidException {
+ return createGerritTemplate(name, mode, config);
+ }
+
+ public static GerritTemplate createGerritTemplate(String name, GerritMode mode)
+ throws ConfigInvalidException {
+ Config cfg = new Config();
+ cfg.fromText(DEFAULT_GERRIT_CONFIG);
+ return createGerritTemplate(name, mode, cfg);
+ }
+
+ public static GerritTemplate createGerritTemplate(String name, GerritMode mode, Config config) {
+ GerritTemplate template = new GerritTemplate();
+ ObjectMeta gerritMeta = new ObjectMetaBuilder().withName(name).build();
+ template.setMetadata(gerritMeta);
+ GerritTemplateSpec gerritSpec = template.getSpec();
+ if (gerritSpec == null) {
+ gerritSpec = new GerritTemplateSpec();
+ GerritSite site = new GerritSite();
+ site.setSize(new Quantity("1Gi"));
+ gerritSpec.setSite(site);
+ gerritSpec.setResources(
+ new ResourceRequirementsBuilder()
+ .withRequests(Map.of("cpu", new Quantity("1"), "memory", new Quantity("5Gi")))
+ .build());
+ }
+ gerritSpec.setMode(mode);
+ gerritSpec.setConfigFiles(Map.of("gerrit.config", config.toText()));
+ gerritSpec.setSecretRef(SECURE_CONFIG_SECRET_NAME);
+ template.setSpec(gerritSpec);
+ return template;
+ }
+
+ private void createGerritCR() {
+ ObjectMeta gerritMeta = new ObjectMetaBuilder().withName(name).withNamespace(namespace).build();
+ gerrit.setMetadata(gerritMeta);
+ GerritSpec gerritSpec = gerrit.getSpec();
+ if (gerritSpec == null) {
+ gerritSpec = new GerritSpec();
+ GerritSite site = new GerritSite();
+ site.setSize(new Quantity("1Gi"));
+ gerritSpec.setSite(site);
+ gerritSpec.setServerId("gerrit-1234");
+ gerritSpec.setResources(
+ new ResourceRequirementsBuilder()
+ .withRequests(Map.of("cpu", new Quantity("1"), "memory", new Quantity("5Gi")))
+ .build());
+ }
+ gerritSpec.setMode(mode);
+ gerritSpec.setConfigFiles(Map.of("gerrit.config", config.toText()));
+ gerritSpec.setSecretRef(SECURE_CONFIG_SECRET_NAME);
+
+ SharedStorage sharedStorage = new SharedStorage();
+ sharedStorage.setSize(Quantity.parse("1Gi"));
+
+ StorageClassConfig storageClassConfig = new StorageClassConfig();
+ storageClassConfig.setReadWriteMany(testProps.getRWMStorageClass());
+
+ GerritStorageConfig gerritStorageConfig = new GerritStorageConfig();
+ gerritStorageConfig.setSharedStorage(sharedStorage);
+ gerritStorageConfig.setStorageClasses(storageClassConfig);
+ gerritSpec.setStorage(gerritStorageConfig);
+
+ GerritRepositoryConfig repoConfig = new GerritRepositoryConfig();
+ repoConfig.setOrg(testProps.getRegistryOrg());
+ repoConfig.setRegistry(testProps.getRegistry());
+ repoConfig.setTag(testProps.getTag());
+
+ ContainerImageConfig containerImageConfig = new ContainerImageConfig();
+ containerImageConfig.setGerritImages(repoConfig);
+ Set<LocalObjectReference> imagePullSecrets = new HashSet<>();
+ imagePullSecrets.add(
+ new LocalObjectReference(AbstractGerritOperatorE2ETest.IMAGE_PULL_SECRET_NAME));
+ containerImageConfig.setImagePullSecrets(imagePullSecrets);
+ gerritSpec.setContainerImages(containerImageConfig);
+
+ IngressConfig ingressConfig = new IngressConfig();
+ ingressConfig.setHost(testProps.getIngressDomain());
+ ingressConfig.setTlsEnabled(false);
+ gerritSpec.setIngress(ingressConfig);
+
+ gerrit.setSpec(gerritSpec);
+ }
+
+ private void waitForGerritReadiness() {
+ logger.atInfo().log("Waiting max 1 minutes for the configmaps to be created.");
+ await()
+ .atMost(1, MINUTES)
+ .untilAsserted(
+ () -> {
+ assertThat(
+ client
+ .configMaps()
+ .inNamespace(namespace)
+ .withName(GerritConfigMap.getName(gerrit))
+ .get(),
+ is(notNullValue()));
+ assertThat(
+ client
+ .configMaps()
+ .inNamespace(namespace)
+ .withName(GerritInitConfigMap.getName(gerrit))
+ .get(),
+ is(notNullValue()));
+ });
+
+ logger.atInfo().log("Waiting max 1 minutes for the Gerrit StatefulSet to be created.");
+ await()
+ .atMost(1, MINUTES)
+ .untilAsserted(
+ () -> {
+ assertThat(
+ client
+ .apps()
+ .statefulSets()
+ .inNamespace(namespace)
+ .withName(gerrit.getMetadata().getName())
+ .get(),
+ is(notNullValue()));
+ });
+
+ logger.atInfo().log("Waiting max 1 minutes for the Gerrit Service to be created.");
+ await()
+ .atMost(1, MINUTES)
+ .untilAsserted(
+ () -> {
+ assertThat(
+ client
+ .services()
+ .inNamespace(namespace)
+ .withName(GerritService.getName(gerrit))
+ .get(),
+ is(notNullValue()));
+ });
+
+ logger.atInfo().log("Waiting max 2 minutes for the Gerrit StatefulSet to be ready.");
+ await()
+ .atMost(2, MINUTES)
+ .untilAsserted(
+ () -> {
+ assertTrue(
+ client
+ .apps()
+ .statefulSets()
+ .inNamespace(namespace)
+ .withName(gerrit.getMetadata().getName())
+ .isReady());
+ });
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/TestGerritCluster.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/TestGerritCluster.java
new file mode 100644
index 0000000..ce66ec2
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/TestGerritCluster.java
@@ -0,0 +1,256 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.test;
+
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.awaitility.Awaitility.await;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.extensions.api.GerritApi;
+import com.google.gerrit.k8s.operator.network.IngressType;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritCluster;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.cluster.GerritClusterSpec;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.gerrit.GerritTemplate;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.receiver.ReceiverTemplate;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.ContainerImageConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GerritClusterIngressConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GerritIngressTlsConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GerritRepositoryConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.GerritStorageConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.NfsWorkaroundConfig;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.SharedStorage;
+import com.google.gerrit.k8s.operator.v1alpha.api.model.shared.StorageClassConfig;
+import com.urswolfer.gerrit.client.rest.GerritAuthData;
+import com.urswolfer.gerrit.client.rest.GerritRestApiFactory;
+import io.fabric8.kubernetes.api.model.LocalObjectReference;
+import io.fabric8.kubernetes.api.model.ObjectMetaBuilder;
+import io.fabric8.kubernetes.api.model.Quantity;
+import io.fabric8.kubernetes.client.KubernetesClient;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Optional;
+import java.util.Set;
+
+public class TestGerritCluster {
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+ public static final String CLUSTER_NAME = "test-cluster";
+ public static final TestProperties testProps = new TestProperties();
+
+ private final KubernetesClient client;
+ private final String namespace;
+
+ private GerritClusterIngressConfig ingressConfig;
+ private boolean isNfsEnabled = false;
+ private GerritCluster cluster = new GerritCluster();
+ private String hostname;
+ private List<GerritTemplate> gerrits = new ArrayList<>();
+ private Optional<ReceiverTemplate> receiver = Optional.empty();
+
+ public TestGerritCluster(KubernetesClient client, String namespace) {
+ this.client = client;
+ this.namespace = namespace;
+
+ defaultIngressConfig();
+ }
+
+ public GerritCluster getGerritCluster() {
+ return cluster;
+ }
+
+ public String getHostname() {
+ return hostname;
+ }
+
+ public String getNamespace() {
+ return cluster.getMetadata().getNamespace();
+ }
+
+ public void setIngressType(IngressType type) {
+ switch (type) {
+ case INGRESS:
+ hostname = testProps.getIngressDomain();
+ enableIngress();
+ break;
+ case ISTIO:
+ hostname = testProps.getIstioDomain();
+ enableIngress();
+ break;
+ default:
+ hostname = null;
+ defaultIngressConfig();
+ }
+ deploy();
+ }
+
+ private void defaultIngressConfig() {
+ ingressConfig = new GerritClusterIngressConfig();
+ ingressConfig.setEnabled(false);
+ }
+
+ private void enableIngress() {
+ ingressConfig = new GerritClusterIngressConfig();
+ ingressConfig.setEnabled(true);
+ ingressConfig.setHost(hostname);
+ GerritIngressTlsConfig ingressTlsConfig = new GerritIngressTlsConfig();
+ ingressTlsConfig.setEnabled(true);
+ ingressTlsConfig.setSecret("tls-secret");
+ ingressConfig.setTls(ingressTlsConfig);
+ }
+
+ public GerritApi getGerritApiClient(GerritTemplate gerrit, IngressType ingressType) {
+ return new GerritRestApiFactory()
+ .create(new GerritAuthData.Basic(String.format("https://%s", hostname)));
+ }
+
+ public void setNfsEnabled(boolean isNfsEnabled) {
+ this.isNfsEnabled = isNfsEnabled;
+ deploy();
+ }
+
+ public void addGerrit(GerritTemplate gerrit) {
+ gerrits.add(gerrit);
+ }
+
+ public void removeGerrit(GerritTemplate gerrit) {
+ gerrits.remove(gerrit);
+ }
+
+ public void setReceiver(ReceiverTemplate receiver) {
+ this.receiver = Optional.ofNullable(receiver);
+ }
+
+ public GerritCluster build() {
+ cluster.setMetadata(
+ new ObjectMetaBuilder().withName(CLUSTER_NAME).withNamespace(namespace).build());
+
+ SharedStorage sharedStorage = new SharedStorage();
+ sharedStorage.setSize(Quantity.parse("1Gi"));
+
+ StorageClassConfig storageClassConfig = new StorageClassConfig();
+ storageClassConfig.setReadWriteMany(testProps.getRWMStorageClass());
+
+ NfsWorkaroundConfig nfsWorkaround = new NfsWorkaroundConfig();
+ nfsWorkaround.setEnabled(isNfsEnabled);
+ nfsWorkaround.setIdmapdConfig("[General]\nDomain = localdomain.com");
+ storageClassConfig.setNfsWorkaround(nfsWorkaround);
+
+ GerritClusterSpec clusterSpec = new GerritClusterSpec();
+ GerritStorageConfig gerritStorageConfig = new GerritStorageConfig();
+ gerritStorageConfig.setSharedStorage(sharedStorage);
+ gerritStorageConfig.setStorageClasses(storageClassConfig);
+ clusterSpec.setStorage(gerritStorageConfig);
+
+ GerritRepositoryConfig repoConfig = new GerritRepositoryConfig();
+ repoConfig.setOrg(testProps.getRegistryOrg());
+ repoConfig.setRegistry(testProps.getRegistry());
+ repoConfig.setTag(testProps.getTag());
+
+ ContainerImageConfig containerImageConfig = new ContainerImageConfig();
+ containerImageConfig.setGerritImages(repoConfig);
+ Set<LocalObjectReference> imagePullSecrets = new HashSet<>();
+ imagePullSecrets.add(
+ new LocalObjectReference(AbstractGerritOperatorE2ETest.IMAGE_PULL_SECRET_NAME));
+ containerImageConfig.setImagePullSecrets(imagePullSecrets);
+ clusterSpec.setContainerImages(containerImageConfig);
+
+ clusterSpec.setIngress(ingressConfig);
+
+ clusterSpec.setGerrits(gerrits);
+ if (receiver.isPresent()) {
+ clusterSpec.setReceiver(receiver.get());
+ }
+
+ cluster.setSpec(clusterSpec);
+ return cluster;
+ }
+
+ public void deploy() {
+ build();
+ client.resource(cluster).inNamespace(namespace).createOrReplace();
+ await()
+ .atMost(2, MINUTES)
+ .untilAsserted(
+ () -> {
+ assertThat(
+ client
+ .resources(GerritCluster.class)
+ .inNamespace(namespace)
+ .withName(CLUSTER_NAME)
+ .get(),
+ is(notNullValue()));
+ });
+
+ GerritCluster updatedCluster =
+ client.resources(GerritCluster.class).inNamespace(namespace).withName(CLUSTER_NAME).get();
+ for (GerritTemplate gerrit : updatedCluster.getSpec().getGerrits()) {
+ waitForGerritReadiness(gerrit);
+ }
+ if (receiver.isPresent()) {
+ waitForReceiverReadiness();
+ }
+ }
+
+ private void waitForGerritReadiness(GerritTemplate gerrit) {
+ logger.atInfo().log("Waiting max 2 minutes for the Gerrit StatefulSet to be ready.");
+ await()
+ .pollDelay(15, SECONDS)
+ .atMost(2, MINUTES)
+ .untilAsserted(
+ () -> {
+ assertThat(
+ client
+ .apps()
+ .statefulSets()
+ .inNamespace(namespace)
+ .withName(gerrit.getMetadata().getName())
+ .get(),
+ is(notNullValue()));
+ assertTrue(
+ client
+ .apps()
+ .statefulSets()
+ .inNamespace(namespace)
+ .withName(gerrit.getMetadata().getName())
+ .isReady());
+ assertTrue(
+ client
+ .pods()
+ .inNamespace(namespace)
+ .withName(gerrit.getMetadata().getName() + "-0")
+ .isReady());
+ });
+ }
+
+ private void waitForReceiverReadiness() {
+ await()
+ .atMost(2, MINUTES)
+ .untilAsserted(
+ () -> {
+ assertTrue(
+ client
+ .apps()
+ .deployments()
+ .inNamespace(namespace)
+ .withName(receiver.get().getMetadata().getName())
+ .isReady());
+ });
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/TestProperties.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/TestProperties.java
new file mode 100644
index 0000000..d7d23bf
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/TestProperties.java
@@ -0,0 +1,78 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.test;
+
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.util.Properties;
+
+public class TestProperties {
+ private final Properties props = getProperties();
+
+ private static Properties getProperties() {
+ String propertiesPath = System.getProperty("properties", "test.properties");
+ Properties props = new Properties();
+ try {
+ props.load(new FileInputStream(propertiesPath));
+ } catch (IOException e) {
+ throw new IllegalStateException("Could not load properties file.");
+ }
+ return props;
+ }
+
+ public String getRWMStorageClass() {
+ return props.getProperty("rwmStorageClass", "nfs-client");
+ }
+
+ public String getRegistry() {
+ return props.getProperty("registry", "");
+ }
+
+ public String getRegistryOrg() {
+ return props.getProperty("registryOrg", "k8sgerrit");
+ }
+
+ public String getRegistryUser() {
+ return props.getProperty("registryUser", "");
+ }
+
+ public String getRegistryPwd() {
+ return props.getProperty("registryPwd", "");
+ }
+
+ public String getTag() {
+ return props.getProperty("tag", "");
+ }
+
+ public String getIngressDomain() {
+ return props.getProperty("ingressDomain", "");
+ }
+
+ public String getIstioDomain() {
+ return props.getProperty("istioDomain", "");
+ }
+
+ public String getLdapAdminPwd() {
+ return props.getProperty("ldapAdminPwd", "");
+ }
+
+ public String getGerritUser() {
+ return props.getProperty("gerritUser", "");
+ }
+
+ public String getGerritPwd() {
+ return props.getProperty("gerritPwd", "");
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/TestSecureConfig.java b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/TestSecureConfig.java
new file mode 100644
index 0000000..04688d7
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/java/com/google/gerrit/k8s/operator/test/TestSecureConfig.java
@@ -0,0 +1,58 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gerrit.k8s.operator.test;
+
+import io.fabric8.kubernetes.api.model.Secret;
+import io.fabric8.kubernetes.api.model.SecretBuilder;
+import io.fabric8.kubernetes.client.KubernetesClient;
+import java.util.Base64;
+import java.util.Map;
+import org.eclipse.jgit.lib.Config;
+
+public class TestSecureConfig {
+ public static final String SECURE_CONFIG_SECRET_NAME = "gerrit-secret";
+
+ private final KubernetesClient client;
+ private final String namespace;
+
+ private Config secureConfig = new Config();
+ private Secret secureConfigSecret;
+
+ public TestSecureConfig(KubernetesClient client, TestProperties testProps, String namespace) {
+ this.client = client;
+ this.namespace = namespace;
+ this.secureConfig.setString("ldap", null, "password", testProps.getLdapAdminPwd());
+ }
+
+ public void createOrReplace() {
+ secureConfigSecret =
+ new SecretBuilder()
+ .withNewMetadata()
+ .withNamespace(namespace)
+ .withName(SECURE_CONFIG_SECRET_NAME)
+ .endMetadata()
+ .withData(
+ Map.of(
+ "secure.config",
+ Base64.getEncoder().encodeToString(secureConfig.toText().getBytes())))
+ .build();
+ client.resource(secureConfigSecret).inNamespace(namespace).createOrReplace();
+ }
+
+ public void modify(String section, String key, String value) {
+ secureConfig.setString(section, null, key, value);
+ createOrReplace();
+ }
+}
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/host.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/host.yaml
new file mode 100644
index 0000000..59ff0a4
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/host.yaml
@@ -0,0 +1,9 @@
+apiVersion: getambassador.io/v2
+kind: Host
+metadata:
+ name: gerrit-ambassador-host
+spec:
+ ambassador_id:
+ - my-id-1
+ - my-id-2
+ hostname: example.com
\ No newline at end of file
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/host_with_tls.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/host_with_tls.yaml
new file mode 100644
index 0000000..c386b3a
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/host_with_tls.yaml
@@ -0,0 +1,13 @@
+apiVersion: getambassador.io/v2
+kind: Host
+metadata:
+ name: gerrit-ambassador-host
+spec:
+ ambassador_id:
+ - my-id-1
+ - my-id-2
+ hostname: example.com
+ tlsContext:
+ name: gerrit-tls-context
+ tlsSecret:
+ name: tls-secret
\ No newline at end of file
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mappingGETReplica_primary_replica.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mappingGETReplica_primary_replica.yaml
new file mode 100644
index 0000000..841d1fb
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mappingGETReplica_primary_replica.yaml
@@ -0,0 +1,18 @@
+apiVersion: getambassador.io/v2
+kind: Mapping
+metadata:
+ name: gerrit-mapping-get-replica
+ namespace: gerrit
+spec:
+ ambassador_id:
+ - my-id-1
+ - my-id-2
+ bypass_auth: true
+ rewrite: ""
+ host: example.com
+ method: GET
+ prefix: /.*/info/refs
+ prefix_regex: true
+ query_parameters:
+ service: git-upload-pack
+ service: replica:48080
\ No newline at end of file
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mappingGET_receiver.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mappingGET_receiver.yaml
new file mode 100644
index 0000000..b179763
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mappingGET_receiver.yaml
@@ -0,0 +1,18 @@
+apiVersion: getambassador.io/v2
+kind: Mapping
+metadata:
+ name: gerrit-mapping-receiver-get
+ namespace: gerrit
+spec:
+ ambassador_id:
+ - my-id-1
+ - my-id-2
+ bypass_auth: true
+ rewrite: ""
+ host: example.com
+ method: GET
+ prefix: /.*/info/refs
+ prefix_regex: true
+ query_parameters:
+ service: git-receive-pack
+ service: receiver:48081
\ No newline at end of file
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mappingPOSTReplica_primary_replica.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mappingPOSTReplica_primary_replica.yaml
new file mode 100644
index 0000000..02f717a
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mappingPOSTReplica_primary_replica.yaml
@@ -0,0 +1,16 @@
+apiVersion: getambassador.io/v2
+kind: Mapping
+metadata:
+ name: gerrit-mapping-post-replica
+ namespace: gerrit
+spec:
+ ambassador_id:
+ - my-id-1
+ - my-id-2
+ bypass_auth: true
+ rewrite: ""
+ host: example.com
+ method: POST
+ prefix: /.*/git-upload-pack
+ prefix_regex: true
+ service: replica:48080
\ No newline at end of file
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mappingPrimary_primary_replica.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mappingPrimary_primary_replica.yaml
new file mode 100644
index 0000000..e6f52a5
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mappingPrimary_primary_replica.yaml
@@ -0,0 +1,14 @@
+apiVersion: getambassador.io/v2
+kind: Mapping
+metadata:
+ name: gerrit-mapping-primary
+ namespace: gerrit
+spec:
+ ambassador_id:
+ - my-id-1
+ - my-id-2
+ bypass_auth: true
+ rewrite: ""
+ host: example.com
+ prefix: /
+ service: primary:48080
\ No newline at end of file
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mapping_primary.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mapping_primary.yaml
new file mode 100644
index 0000000..4ea6c95
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mapping_primary.yaml
@@ -0,0 +1,14 @@
+apiVersion: getambassador.io/v2
+kind: Mapping
+metadata:
+ name: gerrit-mapping
+ namespace: gerrit
+spec:
+ ambassador_id:
+ - my-id-1
+ - my-id-2
+ bypass_auth: true
+ rewrite: ""
+ host: example.com
+ prefix: /
+ service: primary:48080
\ No newline at end of file
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mapping_receiver.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mapping_receiver.yaml
new file mode 100644
index 0000000..93b9eed
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mapping_receiver.yaml
@@ -0,0 +1,15 @@
+apiVersion: getambassador.io/v2
+kind: Mapping
+metadata:
+ name: gerrit-mapping-receiver
+ namespace: gerrit
+spec:
+ ambassador_id:
+ - my-id-1
+ - my-id-2
+ bypass_auth: true
+ rewrite: ""
+ host: example.com
+ prefix: /a/projects/.*|/.*/git-receive-pack
+ prefix_regex: true
+ service: receiver:48081
\ No newline at end of file
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mapping_replica.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mapping_replica.yaml
new file mode 100644
index 0000000..7fb2b7b
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/mapping_replica.yaml
@@ -0,0 +1,14 @@
+apiVersion: getambassador.io/v2
+kind: Mapping
+metadata:
+ name: gerrit-mapping
+ namespace: gerrit
+spec:
+ ambassador_id:
+ - my-id-1
+ - my-id-2
+ bypass_auth: true
+ rewrite: ""
+ host: example.com
+ prefix: /
+ service: replica:48080
\ No newline at end of file
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/tlscontext.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/tlscontext.yaml
new file mode 100644
index 0000000..aee8ddb
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ambassador/dependent/tlscontext.yaml
@@ -0,0 +1,13 @@
+apiVersion: getambassador.io/v2
+kind: TLSContext
+metadata:
+ name: gerrit-tls-context
+ namespace: gerrit
+spec:
+ ambassador_id:
+ - my-id-1
+ - my-id-2
+ secret: tls-secret
+ secret_namespacing: true
+ hosts:
+ - example.com
\ No newline at end of file
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary.yaml
new file mode 100644
index 0000000..68b81f5
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary.yaml
@@ -0,0 +1,17 @@
+apiVersion: "gerritoperator.google.com/v1alpha1"
+kind: GerritNetwork
+metadata:
+ name: gerrit
+ namespace: gerrit
+spec:
+ ingress:
+ enabled: true
+ host: example.com
+ tls:
+ enabled: false
+ ambassador:
+ id: ["my-id-1", "my-id-2"]
+ primaryGerrit:
+ name: primary
+ httpPort: 48080
+ sshPort: 49418
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary_replica.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary_replica.yaml
new file mode 100644
index 0000000..443e465
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary_replica.yaml
@@ -0,0 +1,21 @@
+apiVersion: "gerritoperator.google.com/v1alpha1"
+kind: GerritNetwork
+metadata:
+ name: gerrit
+ namespace: gerrit
+spec:
+ ingress:
+ enabled: true
+ host: example.com
+ tls:
+ enabled: false
+ ambassador:
+ id: ["my-id-1", "my-id-2"]
+ primaryGerrit:
+ name: primary
+ httpPort: 48080
+ sshPort: 49418
+ gerritReplica:
+ name: replica
+ httpPort: 48080
+ sshPort: 49418
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary_replica_create_host.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary_replica_create_host.yaml
new file mode 100644
index 0000000..ef4f76c
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary_replica_create_host.yaml
@@ -0,0 +1,22 @@
+apiVersion: "gerritoperator.google.com/v1alpha1"
+kind: GerritNetwork
+metadata:
+ name: gerrit
+ namespace: gerrit
+spec:
+ ingress:
+ enabled: true
+ host: example.com
+ tls:
+ enabled: false
+ ambassador:
+ id: ["my-id-1", "my-id-2"]
+ createHost: true
+ primaryGerrit:
+ name: primary
+ httpPort: 48080
+ sshPort: 49418
+ gerritReplica:
+ name: replica
+ httpPort: 48080
+ sshPort: 49418
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary_replica_ssh.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary_replica_ssh.yaml
new file mode 100644
index 0000000..623890f
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary_replica_ssh.yaml
@@ -0,0 +1,21 @@
+apiVersion: "gerritoperator.google.com/v1alpha1"
+kind: GerritNetwork
+metadata:
+ name: gerrit
+ namespace: gerrit
+spec:
+ ingress:
+ enabled: true
+ host: example.com
+ tls:
+ enabled: false
+ ssh:
+ enabled: true
+ primaryGerrit:
+ name: primary
+ httpPort: 48080
+ sshPort: 49418
+ gerritReplica:
+ name: replica
+ httpPort: 48080
+ sshPort: 49419
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary_replica_tls.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary_replica_tls.yaml
new file mode 100644
index 0000000..21b9e59
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary_replica_tls.yaml
@@ -0,0 +1,22 @@
+apiVersion: "gerritoperator.google.com/v1alpha1"
+kind: GerritNetwork
+metadata:
+ name: gerrit
+ namespace: gerrit
+spec:
+ ingress:
+ enabled: true
+ host: example.com
+ tls:
+ enabled: true
+ secret: tls-secret
+ ambassador:
+ id: ["my-id-1", "my-id-2"]
+ primaryGerrit:
+ name: primary
+ httpPort: 48080
+ sshPort: 49418
+ gerritReplica:
+ name: replica
+ httpPort: 48080
+ sshPort: 49418
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary_replica_tls_create_host.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary_replica_tls_create_host.yaml
new file mode 100644
index 0000000..c84e3cc
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary_replica_tls_create_host.yaml
@@ -0,0 +1,23 @@
+apiVersion: "gerritoperator.google.com/v1alpha1"
+kind: GerritNetwork
+metadata:
+ name: gerrit
+ namespace: gerrit
+spec:
+ ingress:
+ enabled: true
+ host: example.com
+ tls:
+ enabled: true
+ secret: tls-secret
+ ambassador:
+ id: ["my-id-1", "my-id-2"]
+ createHost: true
+ primaryGerrit:
+ name: primary
+ httpPort: 48080
+ sshPort: 49418
+ gerritReplica:
+ name: replica
+ httpPort: 48080
+ sshPort: 49418
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary_ssh.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary_ssh.yaml
new file mode 100644
index 0000000..a6b3271
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_primary_ssh.yaml
@@ -0,0 +1,17 @@
+apiVersion: "gerritoperator.google.com/v1alpha1"
+kind: GerritNetwork
+metadata:
+ name: gerrit
+ namespace: gerrit
+spec:
+ ingress:
+ enabled: true
+ host: example.com
+ tls:
+ enabled: false
+ ssh:
+ enabled: true
+ primaryGerrit:
+ name: primary
+ httpPort: 48080
+ sshPort: 49418
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_receiver_replica.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_receiver_replica.yaml
new file mode 100644
index 0000000..e6da865
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_receiver_replica.yaml
@@ -0,0 +1,20 @@
+apiVersion: "gerritoperator.google.com/v1alpha1"
+kind: GerritNetwork
+metadata:
+ name: gerrit
+ namespace: gerrit
+spec:
+ ingress:
+ enabled: true
+ host: example.com
+ tls:
+ enabled: false
+ ambassador:
+ id: ["my-id-1", "my-id-2"]
+ gerritReplica:
+ name: replica
+ httpPort: 48080
+ sshPort: 49418
+ receiver:
+ name: receiver
+ httpPort: 48081
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_receiver_replica_ssh.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_receiver_replica_ssh.yaml
new file mode 100644
index 0000000..7efc649
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_receiver_replica_ssh.yaml
@@ -0,0 +1,20 @@
+apiVersion: "gerritoperator.google.com/v1alpha1"
+kind: GerritNetwork
+metadata:
+ name: gerrit
+ namespace: gerrit
+spec:
+ ingress:
+ enabled: true
+ host: example.com
+ tls:
+ enabled: false
+ ssh:
+ enabled: true
+ gerritReplica:
+ name: replica
+ httpPort: 48080
+ sshPort: 49419
+ receiver:
+ name: receiver
+ httpPort: 48081
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_receiver_replica_tls.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_receiver_replica_tls.yaml
new file mode 100644
index 0000000..d5bd3c0
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_receiver_replica_tls.yaml
@@ -0,0 +1,21 @@
+apiVersion: "gerritoperator.google.com/v1alpha1"
+kind: GerritNetwork
+metadata:
+ name: gerrit
+ namespace: gerrit
+spec:
+ ingress:
+ enabled: true
+ host: example.com
+ tls:
+ enabled: true
+ secret: tls-secret
+ ambassador:
+ id: ["my-id-1", "my-id-2"]
+ gerritReplica:
+ name: replica
+ httpPort: 48080
+ sshPort: 49418
+ receiver:
+ name: receiver
+ httpPort: 48081
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_replica.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_replica.yaml
new file mode 100644
index 0000000..915da97
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_replica.yaml
@@ -0,0 +1,17 @@
+apiVersion: "gerritoperator.google.com/v1alpha1"
+kind: GerritNetwork
+metadata:
+ name: gerrit
+ namespace: gerrit
+spec:
+ ingress:
+ enabled: true
+ host: example.com
+ tls:
+ enabled: false
+ ambassador:
+ id: ["my-id-1", "my-id-2"]
+ gerritReplica:
+ name: replica
+ httpPort: 48080
+ sshPort: 49418
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_replica_ssh.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_replica_ssh.yaml
new file mode 100644
index 0000000..efa54a7
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/gerritnetwork_replica_ssh.yaml
@@ -0,0 +1,17 @@
+apiVersion: "gerritoperator.google.com/v1alpha1"
+kind: GerritNetwork
+metadata:
+ name: gerrit
+ namespace: gerrit
+spec:
+ ingress:
+ enabled: true
+ host: example.com
+ tls:
+ enabled: false
+ ssh:
+ enabled: true
+ gerritReplica:
+ name: replica
+ httpPort: 48080
+ sshPort: 49419
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ingress/dependent/ingress_primary.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ingress/dependent/ingress_primary.yaml
new file mode 100644
index 0000000..9b18d5c
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ingress/dependent/ingress_primary.yaml
@@ -0,0 +1,25 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: gerrit-ingress
+ namespace: gerrit
+ annotations:
+ nginx.ingress.kubernetes.io/use-regex: true
+ kubernetes.io/ingress.class: nginx
+ nginx.ingress.kubernetes.io/affinity: cookie
+ nginx.ingress.kubernetes.io/session-cookie-name: Gerrit_Session
+ nginx.ingress.kubernetes.io/session-cookie-path: /
+ nginx.ingress.kubernetes.io/session-cookie-max-age: 60
+ nginx.ingress.kubernetes.io/session-cookie-expires: 60
+spec:
+ rules:
+ - host: example.com
+ http:
+ paths:
+ - pathType: Prefix
+ path: "/"
+ backend:
+ service:
+ name: primary
+ port:
+ name: http
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ingress/dependent/ingress_primary_replica.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ingress/dependent/ingress_primary_replica.yaml
new file mode 100644
index 0000000..d6aae78
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ingress/dependent/ingress_primary_replica.yaml
@@ -0,0 +1,38 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: gerrit-ingress
+ namespace: gerrit
+ annotations:
+ nginx.ingress.kubernetes.io/use-regex: true
+ kubernetes.io/ingress.class: nginx
+ nginx.ingress.kubernetes.io/configuration-snippet: |-
+ if ($args ~ service=git-upload-pack){
+ set $proxy_upstream_name "gerrit-replica-http";
+ set $proxy_host $proxy_upstream_name;
+ set $service_name "replica";
+ }
+ nginx.ingress.kubernetes.io/affinity: cookie
+ nginx.ingress.kubernetes.io/session-cookie-name: Gerrit_Session
+ nginx.ingress.kubernetes.io/session-cookie-path: /
+ nginx.ingress.kubernetes.io/session-cookie-max-age: 60
+ nginx.ingress.kubernetes.io/session-cookie-expires: 60
+spec:
+ rules:
+ - host: example.com
+ http:
+ paths:
+ - pathType: Prefix
+ path: "/.*/git-upload-pack"
+ backend:
+ service:
+ name: replica
+ port:
+ name: http
+ - pathType: Prefix
+ path: "/"
+ backend:
+ service:
+ name: primary
+ port:
+ name: http
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ingress/dependent/ingress_primary_replica_tls.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ingress/dependent/ingress_primary_replica_tls.yaml
new file mode 100644
index 0000000..adbe808
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ingress/dependent/ingress_primary_replica_tls.yaml
@@ -0,0 +1,42 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: gerrit-ingress
+ namespace: gerrit
+ annotations:
+ nginx.ingress.kubernetes.io/use-regex: true
+ kubernetes.io/ingress.class: nginx
+ nginx.ingress.kubernetes.io/configuration-snippet: |-
+ if ($args ~ service=git-upload-pack){
+ set $proxy_upstream_name "gerrit-replica-http";
+ set $proxy_host $proxy_upstream_name;
+ set $service_name "replica";
+ }
+ nginx.ingress.kubernetes.io/affinity: cookie
+ nginx.ingress.kubernetes.io/session-cookie-name: Gerrit_Session
+ nginx.ingress.kubernetes.io/session-cookie-path: /
+ nginx.ingress.kubernetes.io/session-cookie-max-age: 60
+ nginx.ingress.kubernetes.io/session-cookie-expires: 60
+spec:
+ tls:
+ - hosts:
+ - example.com
+ secretName: tls-secret
+ rules:
+ - host: example.com
+ http:
+ paths:
+ - pathType: Prefix
+ path: "/.*/git-upload-pack"
+ backend:
+ service:
+ name: replica
+ port:
+ name: http
+ - pathType: Prefix
+ path: "/"
+ backend:
+ service:
+ name: primary
+ port:
+ name: http
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ingress/dependent/ingress_receiver_replica.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ingress/dependent/ingress_receiver_replica.yaml
new file mode 100644
index 0000000..75a37c7
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ingress/dependent/ingress_receiver_replica.yaml
@@ -0,0 +1,45 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: gerrit-ingress
+ namespace: gerrit
+ annotations:
+ nginx.ingress.kubernetes.io/use-regex: true
+ kubernetes.io/ingress.class: nginx
+ nginx.ingress.kubernetes.io/affinity: cookie
+ nginx.ingress.kubernetes.io/session-cookie-name: Gerrit_Session
+ nginx.ingress.kubernetes.io/session-cookie-path: /
+ nginx.ingress.kubernetes.io/session-cookie-max-age: 60
+ nginx.ingress.kubernetes.io/session-cookie-expires: 60
+ nginx.ingress.kubernetes.io/configuration-snippet: |-
+ if ($args ~ service=git-receive-pack){
+ set $proxy_upstream_name "gerrit-receiver-http";
+ set $proxy_host $proxy_upstream_name;
+ set $service_name "receiver";
+ }
+spec:
+ rules:
+ - host: example.com
+ http:
+ paths:
+ - pathType: Prefix
+ path: "/a/projects"
+ backend:
+ service:
+ name: receiver
+ port:
+ name: http
+ - pathType: Prefix
+ path: "/.*/git-receive-pack"
+ backend:
+ service:
+ name: receiver
+ port:
+ name: http
+ - pathType: Prefix
+ path: "/"
+ backend:
+ service:
+ name: replica
+ port:
+ name: http
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ingress/dependent/ingress_receiver_replica_tls.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ingress/dependent/ingress_receiver_replica_tls.yaml
new file mode 100644
index 0000000..ad4cfbf
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ingress/dependent/ingress_receiver_replica_tls.yaml
@@ -0,0 +1,49 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: gerrit-ingress
+ namespace: gerrit
+ annotations:
+ nginx.ingress.kubernetes.io/use-regex: true
+ kubernetes.io/ingress.class: nginx
+ nginx.ingress.kubernetes.io/affinity: cookie
+ nginx.ingress.kubernetes.io/session-cookie-name: Gerrit_Session
+ nginx.ingress.kubernetes.io/session-cookie-path: /
+ nginx.ingress.kubernetes.io/session-cookie-max-age: 60
+ nginx.ingress.kubernetes.io/session-cookie-expires: 60
+ nginx.ingress.kubernetes.io/configuration-snippet: |-
+ if ($args ~ service=git-receive-pack){
+ set $proxy_upstream_name "gerrit-receiver-http";
+ set $proxy_host $proxy_upstream_name;
+ set $service_name "receiver";
+ }
+spec:
+ tls:
+ - hosts:
+ - example.com
+ secretName: tls-secret
+ rules:
+ - host: example.com
+ http:
+ paths:
+ - pathType: Prefix
+ path: "/a/projects"
+ backend:
+ service:
+ name: receiver
+ port:
+ name: http
+ - pathType: Prefix
+ path: "/.*/git-receive-pack"
+ backend:
+ service:
+ name: receiver
+ port:
+ name: http
+ - pathType: Prefix
+ path: "/"
+ backend:
+ service:
+ name: replica
+ port:
+ name: http
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ingress/dependent/ingress_replica.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ingress/dependent/ingress_replica.yaml
new file mode 100644
index 0000000..07dfe7d
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/ingress/dependent/ingress_replica.yaml
@@ -0,0 +1,25 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: gerrit-ingress
+ namespace: gerrit
+ annotations:
+ nginx.ingress.kubernetes.io/use-regex: true
+ kubernetes.io/ingress.class: nginx
+ nginx.ingress.kubernetes.io/affinity: cookie
+ nginx.ingress.kubernetes.io/session-cookie-name: Gerrit_Session
+ nginx.ingress.kubernetes.io/session-cookie-path: /
+ nginx.ingress.kubernetes.io/session-cookie-max-age: 60
+ nginx.ingress.kubernetes.io/session-cookie-expires: 60
+spec:
+ rules:
+ - host: example.com
+ http:
+ paths:
+ - pathType: Prefix
+ path: "/"
+ backend:
+ service:
+ name: replica
+ port:
+ name: http
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/gateway.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/gateway.yaml
new file mode 100644
index 0000000..ccbab63
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/gateway.yaml
@@ -0,0 +1,17 @@
+apiVersion: networking.istio.io/v1beta1
+kind: Gateway
+metadata:
+ name: gerrit-istio-gateway
+ namespace: gerrit
+spec:
+ selector:
+ istio: ingressgateway
+ servers:
+ - port:
+ number: 80
+ name: http
+ protocol: HTTP
+ hosts:
+ - example.com
+ tls:
+ httpsRedirect: false
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/gateway_primary_replica_ssh.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/gateway_primary_replica_ssh.yaml
new file mode 100644
index 0000000..70a38f2
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/gateway_primary_replica_ssh.yaml
@@ -0,0 +1,29 @@
+apiVersion: networking.istio.io/v1beta1
+kind: Gateway
+metadata:
+ name: gerrit-istio-gateway
+ namespace: gerrit
+spec:
+ selector:
+ istio: ingressgateway
+ servers:
+ - port:
+ number: 80
+ name: http
+ protocol: HTTP
+ hosts:
+ - example.com
+ tls:
+ httpsRedirect: false
+ - port:
+ number: 49418
+ name: ssh-primary
+ protocol: TCP
+ hosts:
+ - example.com
+ - port:
+ number: 49419
+ name: ssh-replica
+ protocol: TCP
+ hosts:
+ - example.com
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/gateway_primary_ssh.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/gateway_primary_ssh.yaml
new file mode 100644
index 0000000..7815aba
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/gateway_primary_ssh.yaml
@@ -0,0 +1,23 @@
+apiVersion: networking.istio.io/v1beta1
+kind: Gateway
+metadata:
+ name: gerrit-istio-gateway
+ namespace: gerrit
+spec:
+ selector:
+ istio: ingressgateway
+ servers:
+ - port:
+ number: 80
+ name: http
+ protocol: HTTP
+ hosts:
+ - example.com
+ tls:
+ httpsRedirect: false
+ - port:
+ number: 49418
+ name: ssh-primary
+ protocol: TCP
+ hosts:
+ - example.com
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/gateway_receiver_replica_ssh.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/gateway_receiver_replica_ssh.yaml
new file mode 100644
index 0000000..04212e3
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/gateway_receiver_replica_ssh.yaml
@@ -0,0 +1,23 @@
+apiVersion: networking.istio.io/v1beta1
+kind: Gateway
+metadata:
+ name: gerrit-istio-gateway
+ namespace: gerrit
+spec:
+ selector:
+ istio: ingressgateway
+ servers:
+ - port:
+ number: 80
+ name: http
+ protocol: HTTP
+ hosts:
+ - example.com
+ tls:
+ httpsRedirect: false
+ - port:
+ number: 49419
+ name: ssh-replica
+ protocol: TCP
+ hosts:
+ - example.com
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/gateway_replica_ssh.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/gateway_replica_ssh.yaml
new file mode 100644
index 0000000..04212e3
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/gateway_replica_ssh.yaml
@@ -0,0 +1,23 @@
+apiVersion: networking.istio.io/v1beta1
+kind: Gateway
+metadata:
+ name: gerrit-istio-gateway
+ namespace: gerrit
+spec:
+ selector:
+ istio: ingressgateway
+ servers:
+ - port:
+ number: 80
+ name: http
+ protocol: HTTP
+ hosts:
+ - example.com
+ tls:
+ httpsRedirect: false
+ - port:
+ number: 49419
+ name: ssh-replica
+ protocol: TCP
+ hosts:
+ - example.com
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/gateway_tls.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/gateway_tls.yaml
new file mode 100644
index 0000000..9f64ad5
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/gateway_tls.yaml
@@ -0,0 +1,26 @@
+apiVersion: networking.istio.io/v1beta1
+kind: Gateway
+metadata:
+ name: gerrit-istio-gateway
+ namespace: gerrit
+spec:
+ selector:
+ istio: ingressgateway
+ servers:
+ - port:
+ number: 80
+ name: http
+ protocol: HTTP
+ hosts:
+ - example.com
+ tls:
+ httpsRedirect: true
+ - port:
+ number: 443
+ name: https
+ protocol: HTTPS
+ hosts:
+ - example.com
+ tls:
+ mode: SIMPLE
+ credentialName: tls-secret
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_primary.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_primary.yaml
new file mode 100644
index 0000000..ab1bf7a
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_primary.yaml
@@ -0,0 +1,17 @@
+apiVersion: networking.istio.io/v1beta1
+kind: VirtualService
+metadata:
+ name: gerrit-gerrit-http-virtual-service
+ namespace: gerrit
+spec:
+ hosts:
+ - example.com
+ gateways:
+ - gerrit/gerrit-istio-gateway
+ http:
+ - name: gerrit-primary-primary
+ route:
+ - destination:
+ port:
+ number: 48080
+ host: primary.gerrit.svc.cluster.local
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_primary_replica.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_primary_replica.yaml
new file mode 100644
index 0000000..f97524d
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_primary_replica.yaml
@@ -0,0 +1,37 @@
+apiVersion: networking.istio.io/v1beta1
+kind: VirtualService
+metadata:
+ name: gerrit-gerrit-http-virtual-service
+ namespace: gerrit
+spec:
+ hosts:
+ - example.com
+ gateways:
+ - gerrit/gerrit-istio-gateway
+ http:
+ - name: gerrit-replica-replica
+ match:
+ - uri:
+ regex: "^/(.*)/info/refs$"
+ queryParams:
+ service:
+ exact: git-upload-pack
+ ignoreUriCase: true
+ method:
+ exact: GET
+ - uri:
+ regex: "^/(.*)/git-upload-pack$"
+ ignoreUriCase: true
+ method:
+ exact: POST
+ route:
+ - destination:
+ port:
+ number: 48080
+ host: replica.gerrit.svc.cluster.local
+ - name: gerrit-primary-primary
+ route:
+ - destination:
+ port:
+ number: 48080
+ host: primary.gerrit.svc.cluster.local
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_primary_replica_ssh.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_primary_replica_ssh.yaml
new file mode 100644
index 0000000..30ffb44
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_primary_replica_ssh.yaml
@@ -0,0 +1,52 @@
+apiVersion: networking.istio.io/v1beta1
+kind: VirtualService
+metadata:
+ name: gerrit-gerrit-http-virtual-service
+ namespace: gerrit
+spec:
+ hosts:
+ - example.com
+ gateways:
+ - gerrit/gerrit-istio-gateway
+ http:
+ - name: gerrit-replica-replica
+ match:
+ - uri:
+ regex: "^/(.*)/info/refs$"
+ queryParams:
+ service:
+ exact: git-upload-pack
+ ignoreUriCase: true
+ method:
+ exact: GET
+ - uri:
+ regex: "^/(.*)/git-upload-pack$"
+ ignoreUriCase: true
+ method:
+ exact: POST
+ route:
+ - destination:
+ port:
+ number: 48080
+ host: replica.gerrit.svc.cluster.local
+ - name: gerrit-primary-primary
+ route:
+ - destination:
+ port:
+ number: 48080
+ host: primary.gerrit.svc.cluster.local
+ tcp:
+ - match:
+ - port: 49418
+ route:
+ - destination:
+ port:
+ number: 49418
+ host: primary.gerrit.svc.cluster.local
+ - match:
+ - port: 49419
+ route:
+ - destination:
+ port:
+ number: 49419
+ host: replica.gerrit.svc.cluster.local
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_primary_ssh.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_primary_ssh.yaml
new file mode 100644
index 0000000..73f5bd7
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_primary_ssh.yaml
@@ -0,0 +1,25 @@
+apiVersion: networking.istio.io/v1beta1
+kind: VirtualService
+metadata:
+ name: gerrit-gerrit-http-virtual-service
+ namespace: gerrit
+spec:
+ hosts:
+ - example.com
+ gateways:
+ - gerrit/gerrit-istio-gateway
+ http:
+ - name: gerrit-primary-primary
+ route:
+ - destination:
+ port:
+ number: 48080
+ host: primary.gerrit.svc.cluster.local
+ tcp:
+ - match:
+ - port: 49418
+ route:
+ - destination:
+ port:
+ number: 49418
+ host: primary.gerrit.svc.cluster.local
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_receiver_replica.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_receiver_replica.yaml
new file mode 100644
index 0000000..3a224d6
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_receiver_replica.yaml
@@ -0,0 +1,33 @@
+apiVersion: networking.istio.io/v1beta1
+kind: VirtualService
+metadata:
+ name: gerrit-gerrit-http-virtual-service
+ namespace: gerrit
+spec:
+ hosts:
+ - example.com
+ gateways:
+ - gerrit/gerrit-istio-gateway
+ http:
+ - name: receiver-receiver
+ match:
+ - uri:
+ prefix: "/a/projects/"
+ - uri:
+ regex: "^/(.*)/git-receive-pack$"
+ - uri:
+ regex: "^/(.*)/info/refs$"
+ queryParams:
+ service:
+ exact: git-receive-pack
+ route:
+ - destination:
+ port:
+ number: 48081
+ host: receiver.gerrit.svc.cluster.local
+ - name: gerrit-replica-replica
+ route:
+ - destination:
+ port:
+ number: 48080
+ host: replica.gerrit.svc.cluster.local
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_receiver_replica_ssh.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_receiver_replica_ssh.yaml
new file mode 100644
index 0000000..422e2e9
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_receiver_replica_ssh.yaml
@@ -0,0 +1,41 @@
+apiVersion: networking.istio.io/v1beta1
+kind: VirtualService
+metadata:
+ name: gerrit-gerrit-http-virtual-service
+ namespace: gerrit
+spec:
+ hosts:
+ - example.com
+ gateways:
+ - gerrit/gerrit-istio-gateway
+ http:
+ - name: receiver-receiver
+ match:
+ - uri:
+ prefix: "/a/projects/"
+ - uri:
+ regex: "^/(.*)/git-receive-pack$"
+ - uri:
+ regex: "^/(.*)/info/refs$"
+ queryParams:
+ service:
+ exact: git-receive-pack
+ route:
+ - destination:
+ port:
+ number: 48081
+ host: receiver.gerrit.svc.cluster.local
+ - name: gerrit-replica-replica
+ route:
+ - destination:
+ port:
+ number: 48080
+ host: replica.gerrit.svc.cluster.local
+ tcp:
+ - match:
+ - port: 49419
+ route:
+ - destination:
+ port:
+ number: 49419
+ host: replica.gerrit.svc.cluster.local
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_replica.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_replica.yaml
new file mode 100644
index 0000000..d077def
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_replica.yaml
@@ -0,0 +1,17 @@
+apiVersion: networking.istio.io/v1beta1
+kind: VirtualService
+metadata:
+ name: gerrit-gerrit-http-virtual-service
+ namespace: gerrit
+spec:
+ hosts:
+ - example.com
+ gateways:
+ - gerrit/gerrit-istio-gateway
+ http:
+ - name: gerrit-replica-replica
+ route:
+ - destination:
+ port:
+ number: 48080
+ host: replica.gerrit.svc.cluster.local
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_replica_ssh.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_replica_ssh.yaml
new file mode 100644
index 0000000..d9e7fd3
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/network/istio/dependent/virtualservice_replica_ssh.yaml
@@ -0,0 +1,25 @@
+apiVersion: networking.istio.io/v1beta1
+kind: VirtualService
+metadata:
+ name: gerrit-gerrit-http-virtual-service
+ namespace: gerrit
+spec:
+ hosts:
+ - example.com
+ gateways:
+ - gerrit/gerrit-istio-gateway
+ http:
+ - name: gerrit-replica-replica
+ route:
+ - destination:
+ port:
+ number: 48080
+ host: replica.gerrit.svc.cluster.local
+ tcp:
+ - match:
+ - port: 49419
+ route:
+ - destination:
+ port:
+ number: 49419
+ host: replica.gerrit.svc.cluster.local
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/receiver/dependent/deployment.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/receiver/dependent/deployment.yaml
new file mode 100644
index 0000000..4857152
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/receiver/dependent/deployment.yaml
@@ -0,0 +1,118 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: receiver
+ namespace: gerrit
+ labels:
+ app.kubernetes.io/managed-by: gerrit-operator
+ app.kubernetes.io/name: gerrit
+ app.kubernetes.io/part-of: receiver
+ app.kubernetes.io/created-by: ReceiverReconciler
+ app.kubernetes.io/instance: receiver
+ app.kubernetes.io/version: unknown
+ app.kubernetes.io/component: receiver-deployment-receiver
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxSurge: 1
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/managed-by: gerrit-operator
+ app.kubernetes.io/name: gerrit
+ app.kubernetes.io/part-of: receiver
+ app.kubernetes.io/instance: receiver
+ app.kubernetes.io/component: receiver-deployment-receiver
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/managed-by: gerrit-operator
+ app.kubernetes.io/name: gerrit
+ app.kubernetes.io/part-of: receiver
+ app.kubernetes.io/created-by: ReceiverReconciler
+ app.kubernetes.io/instance: receiver
+ app.kubernetes.io/version: unknown
+ app.kubernetes.io/component: receiver-deployment-receiver
+ spec:
+ tolerations:
+ - key: key1
+ operator: Equal
+ value: value1
+ effect: NoSchedule
+ topologySpreadConstraints:
+ - maxSkew: 1
+ topologyKey: zone
+ whenUnsatisfiable: DoNotSchedule
+ labelSelector:
+ matchLabels:
+ foo: bar
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: disktype
+ operator: In
+ values:
+ - ssd
+ priorityClassName: prio
+ securityContext:
+ fsGroup: 100
+ imagePullSecrets: []
+ initContainers: []
+ containers:
+ - name: apache-git-http-backend
+ imagePullPolicy: Always
+ image: docker.io/k8sgerrit/apache-git-http-backend:latest
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ ports:
+ - name: http
+ containerPort: 80
+ resources:
+ requests:
+ cpu: 1
+ memory: 5Gi
+ limits:
+ cpu: 1
+ memory: 6Gi
+
+ readinessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+
+ livenessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+
+ volumeMounts:
+ - name: shared
+ subPathExpr: "logs/$(POD_NAME)"
+ mountPath: /var/log/apache2
+ - name: apache-credentials
+ mountPath: /var/apache/credentials/.htpasswd
+ subPath: .htpasswd
+ - name: shared
+ subPath: git
+ mountPath: /var/gerrit/git
+ volumes:
+ - name: shared
+ persistentVolumeClaim:
+ claimName: shared-pvc
+ - name: apache-credentials
+ secret:
+ secretName: apache-credentials
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/receiver/dependent/deployment_minimal.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/receiver/dependent/deployment_minimal.yaml
new file mode 100644
index 0000000..92d0752
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/receiver/dependent/deployment_minimal.yaml
@@ -0,0 +1,79 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: receiver
+ namespace: gerrit
+ labels:
+ app.kubernetes.io/managed-by: gerrit-operator
+ app.kubernetes.io/name: gerrit
+ app.kubernetes.io/part-of: receiver
+ app.kubernetes.io/created-by: ReceiverReconciler
+ app.kubernetes.io/instance: receiver
+ app.kubernetes.io/version: unknown
+ app.kubernetes.io/component: receiver-deployment-receiver
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxSurge: 1
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/managed-by: gerrit-operator
+ app.kubernetes.io/name: gerrit
+ app.kubernetes.io/part-of: receiver
+ app.kubernetes.io/instance: receiver
+ app.kubernetes.io/component: receiver-deployment-receiver
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/managed-by: gerrit-operator
+ app.kubernetes.io/name: gerrit
+ app.kubernetes.io/part-of: receiver
+ app.kubernetes.io/created-by: ReceiverReconciler
+ app.kubernetes.io/instance: receiver
+ app.kubernetes.io/version: unknown
+ app.kubernetes.io/component: receiver-deployment-receiver
+ spec:
+ securityContext:
+ fsGroup: 100
+ imagePullSecrets: []
+ initContainers: []
+ containers:
+ - name: apache-git-http-backend
+ imagePullPolicy: Always
+ image: docker.io/k8sgerrit/apache-git-http-backend:latest
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ ports:
+ - name: http
+ containerPort: 80
+
+ readinessProbe:
+ tcpSocket:
+ port: 80
+
+ livenessProbe:
+ tcpSocket:
+ port: 80
+
+ volumeMounts:
+ - name: shared
+ subPathExpr: "logs/$(POD_NAME)"
+ mountPath: /var/log/apache2
+ - name: apache-credentials
+ mountPath: /var/apache/credentials/.htpasswd
+ subPath: .htpasswd
+ - name: shared
+ subPath: git
+ mountPath: /var/gerrit/git
+ volumes:
+ - name: shared
+ persistentVolumeClaim:
+ claimName: shared-pvc
+ - name: apache-credentials
+ secret:
+ secretName: apache-credentials
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/receiver/dependent/service.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/receiver/dependent/service.yaml
new file mode 100644
index 0000000..907ee1e
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/receiver/dependent/service.yaml
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: receiver
+ namespace: gerrit
+ labels:
+ app.kubernetes.io/managed-by: gerrit-operator
+ app.kubernetes.io/name: gerrit
+ app.kubernetes.io/part-of: receiver
+ app.kubernetes.io/created-by: ReceiverReconciler
+ app.kubernetes.io/instance: receiver
+ app.kubernetes.io/version: unknown
+ app.kubernetes.io/component: receiver-service
+spec:
+ type: NodePort
+ ports:
+ - name: http
+ port: 80
+ targetPort: 80
+ selector:
+ app.kubernetes.io/managed-by: gerrit-operator
+ app.kubernetes.io/name: gerrit
+ app.kubernetes.io/part-of: receiver
+ app.kubernetes.io/instance: receiver
+ app.kubernetes.io/component: receiver-deployment-receiver
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/receiver/receiver.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/receiver/receiver.yaml
new file mode 100644
index 0000000..3364ba0
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/receiver/receiver.yaml
@@ -0,0 +1,105 @@
+apiVersion: "gerritoperator.google.com/v1alpha6"
+kind: Receiver
+metadata:
+ name: receiver
+ namespace: gerrit
+spec:
+ tolerations:
+ - key: key1
+ operator: Equal
+ value: value1
+ effect: NoSchedule
+
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: disktype
+ operator: In
+ values:
+ - ssd
+
+ topologySpreadConstraints:
+ - maxSkew: 1
+ topologyKey: zone
+ whenUnsatisfiable: DoNotSchedule
+ labelSelector:
+ matchLabels:
+ foo: bar
+
+ priorityClassName: "prio"
+
+ replicas: 1
+ maxSurge: 1
+ maxUnavailable: 1
+
+ resources:
+ requests:
+ cpu: 1
+ memory: 5Gi
+ limits:
+ cpu: 1
+ memory: 6Gi
+
+ readinessProbe:
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+
+ livenessProbe:
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+
+ service:
+ type: NodePort
+ httpPort: 80
+
+ credentialSecretRef: apache-credentials
+
+ containerImages:
+ imagePullSecrets: []
+ imagePullPolicy: Always
+ gerritImages:
+ registry: docker.io
+ org: k8sgerrit
+ tag: latest
+ busyBox:
+ registry: docker.io
+ tag: latest
+
+ storage:
+ storageClasses:
+ readWriteOnce: default
+ readWriteMany: shared-storage
+ nfsWorkaround:
+ enabled: false
+ chownOnStartup: false
+ idmapdConfig: |-
+ [General]
+ Verbosity = 0
+ Domain = localdomain.com
+
+ [Mapping]
+ Nobody-User = nobody
+ Nobody-Group = nogroup
+
+ sharedStorage:
+ externalPVC:
+ enabled: false
+ claimName: ""
+ size: 1Gi
+ volumeName: ""
+ selector:
+ matchLabels:
+ volume-type: ssd
+ aws-availability-zone: us-east-1
+
+ ingress:
+ host: example.com
+ tlsEnabled: false
diff --git a/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/receiver/receiver_minimal.yaml b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/receiver/receiver_minimal.yaml
new file mode 100644
index 0000000..8fdf155
--- /dev/null
+++ b/charts/k8s-gerrit/operator/src/test/resources/com/google/gerrit/k8s/operator/receiver/receiver_minimal.yaml
@@ -0,0 +1,15 @@
+apiVersion: "gerritoperator.google.com/v1alpha6"
+kind: Receiver
+metadata:
+ name: receiver
+ namespace: gerrit
+spec:
+ credentialSecretRef: apache-credentials
+
+ storage:
+ storageClasses:
+ readWriteOnce: default
+ readWriteMany: shared-storage
+
+ sharedStorage:
+ size: 1Gi
diff --git a/charts/k8s-gerrit/operator/test.properties b/charts/k8s-gerrit/operator/test.properties
new file mode 100644
index 0000000..b6d5cdc
--- /dev/null
+++ b/charts/k8s-gerrit/operator/test.properties
@@ -0,0 +1,18 @@
+# Storage
+rwmStorageClass=
+
+# Container registry
+registry=
+registryOrg=
+tag=
+registryUser=
+registryPwd=
+
+# Ingress
+ingressDomain=
+istioDomain=
+
+# LDAP
+ldapAdminPwd=
+gerritUser=
+gerritPwd=
diff --git a/charts/k8s-gerrit/publish b/charts/k8s-gerrit/publish
new file mode 100755
index 0000000..ba8d368
--- /dev/null
+++ b/charts/k8s-gerrit/publish
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+usage() {
+ me=`basename "$0"`
+ echo >&2 "Usage: $me [--help] [--update-latest] [--registry REGISTRY] [--org ORGANIZATION] [--no-push] [--tag TAG] [IMAGE]"
+ exit 1
+}
+
+UPDATE_LATEST=false
+ORGANIZATION=k8sgerrit
+PUSH_IMAGES=true
+
+while test $# -gt 0 ; do
+ case "$1" in
+ --help)
+ usage
+ ;;
+ --update-latest)
+ UPDATE_LATEST=true
+ shift
+ ;;
+ --registry)
+ shift
+ REGISTRY=$1
+ shift
+ ;;
+ --org)
+ shift
+ ORGANIZATION=$1
+ shift
+ ;;
+ --no-push)
+ PUSH_IMAGES=false
+ shift
+ ;;
+ --tag)
+ shift
+ TAG=$1
+ shift
+ ;;
+ *)
+ break
+ esac
+done
+
+if test -z "$TAG"; then
+ TAG=$(./get_version.sh)
+fi
+
+#Get list of images
+source container-images/publish_list
+IMAGES=$(get_image_list)
+
+test -n "$REGISTRY" && [[ "$REGISTRY" != */ ]] && REGISTRY="$REGISTRY/"
+
+publish_image(){
+ IMAGE=$1
+ if test "$UPDATE_LATEST" = "true" ; then
+ docker image tag k8sgerrit/$IMAGE:$TAG ${REGISTRY}${ORGANIZATION}/$IMAGE:latest
+ if test "$PUSH_IMAGES" = "true" ; then
+ docker push "${REGISTRY}${ORGANIZATION}/$IMAGE:latest"
+ fi
+ fi
+
+ docker image tag k8sgerrit/$IMAGE:$TAG ${REGISTRY}${ORGANIZATION}/$IMAGE:$TAG
+ if test "$PUSH_IMAGES" = "true" ; then
+ docker push "${REGISTRY}${ORGANIZATION}/$IMAGE:$TAG"
+ fi
+}
+
+if test $# -eq 0 ; then
+ for IMAGE in $IMAGES; do
+ publish_image $IMAGE
+ done
+else
+ while test $# -gt 0 ; do
+ publish_image $1
+ shift
+ done
+fi
diff --git a/charts/k8s-gerrit/setup.cfg b/charts/k8s-gerrit/setup.cfg
new file mode 100644
index 0000000..0c90095
--- /dev/null
+++ b/charts/k8s-gerrit/setup.cfg
@@ -0,0 +1,10 @@
+[tool:pytest]
+norecursedirs = tests/helpers
+
+markers =
+ docker: Tests that require to run and interact with a docker container
+ incremental: Test classes containing tests that need to run incrementally
+ integration: Integration tests
+ kubernetes: Tests that require a Kubernetes cluster
+ slow: Tests that run slower than the average test
+ structure: Structure tests
diff --git a/charts/k8s-gerrit/supplements/gerrit-master.minikube.values.yaml b/charts/k8s-gerrit/supplements/gerrit-master.minikube.values.yaml
new file mode 100644
index 0000000..cb4e7f2
--- /dev/null
+++ b/charts/k8s-gerrit/supplements/gerrit-master.minikube.values.yaml
@@ -0,0 +1,85 @@
+storageClasses:
+ default:
+ name: standard
+ shared:
+ name: shared-storage
+
+gitGC:
+ schedule: "*/15 * * * *"
+
+ resources:
+ requests:
+ cpu: 50m
+ memory: 100Mi
+ limits:
+ cpu: 50m
+ memory: 100Mi
+
+ logging:
+ persistence:
+ enabled: false
+
+gerrit:
+
+ resources:
+ requests:
+ cpu: 200m
+ memory: 400Mi
+ limits:
+ cpu: 500m
+ memory: 400Mi
+
+ persistence:
+ enabled: false
+
+ livenessProbe:
+ initialDelaySeconds: 90
+ periodSeconds: 5
+
+ ingress:
+ host: primary.gerrit
+
+ config:
+ gerrit: |-
+ [gerrit]
+ basePath = git
+ serverId = gerrit-1
+ canonicalWebUrl = http://primary.gerrit
+ [index]
+ type = LUCENE
+ [auth]
+ type = DEVELOPMENT_BECOME_ANY_ACCOUNT
+ [httpd]
+ listenUrl = proxy-http://*:8080/
+ [sshd]
+ listenAddress = off
+ [transfer]
+ timeout = 120 s
+ [user]
+ name = Gerrit Code Review
+ email = gerrit@example.com
+ anonymousCoward = Unnamed User
+ [cache]
+ directory = cache
+ [container]
+ user = gerrit
+ javaHome = /usr/lib/jvm/java-11-openjdk-amd64
+ javaOptions = -Djavax.net.ssl.trustStore=/var/gerrit/etc/keystore
+ javaOptions = -Xms300m
+ javaOptions = -Xmx300m
+
+ secure: |-
+ [remote "replica"]
+ username = git
+ password = secret
+
+ replication: |-
+ [gerrit]
+ autoReload = false
+ replicateOnStartup = true
+ defaultForceUpdate = true
+
+ [remote "replica"]
+ url = http://gerrit-replica-git-backend-service/git/${name}.git
+ replicationDelay = 0
+ timeout = 30
diff --git a/charts/k8s-gerrit/supplements/gerrit-slave.minikube.values.yaml b/charts/k8s-gerrit/supplements/gerrit-slave.minikube.values.yaml
new file mode 100644
index 0000000..e244c3c
--- /dev/null
+++ b/charts/k8s-gerrit/supplements/gerrit-slave.minikube.values.yaml
@@ -0,0 +1,88 @@
+storageClasses:
+ default:
+ name: standard
+ shared:
+ name: shared-storage
+
+gitBackend:
+ resources:
+ requests:
+ cpu: 50m
+ memory: 50Mi
+ limits:
+ cpu: 50m
+ memory: 100Mi
+
+ logging:
+ persistence:
+ enabled: false
+
+ service:
+ type: NodePort
+
+ ingress:
+ enabled: true
+ host: backend.gerrit
+
+gitGC:
+ schedule: "*/15 * * * *"
+
+ resources:
+ requests:
+ cpu: 50m
+ memory: 100Mi
+ limits:
+ cpu: 50m
+ memory: 100Mi
+
+ logging:
+ persistence:
+ enabled: false
+
+gerritReplica:
+ initializeTestSite:
+ enabled: true
+
+ resources:
+ requests:
+ cpu: 200m
+ memory: 400Mi
+ limits:
+ cpu: 500m
+ memory: 400Mi
+
+ persistence:
+ enabled: false
+
+ ingress:
+ host: replica.gerrit
+
+ config:
+ gerrit: |-
+ [gerrit]
+ basePath = git
+ serverId = gerrit-replica-1
+ canonicalWebUrl = http://replica.gerrit
+ [index]
+ type = LUCENE
+ [auth]
+ type = DEVELOPMENT_BECOME_ANY_ACCOUNT
+ [httpd]
+ listenUrl = proxy-http://*:8080/
+ [sshd]
+ listenAddress = off
+ [transfer]
+ timeout = 120 s
+ [user]
+ name = Gerrit Code Review
+ email = gerrit@example.com
+ anonymousCoward = Unnamed User
+ [cache]
+ directory = cache
+ [container]
+ user = gerrit
+ replica = true
+ javaHome = /usr/lib/jvm/java-11-openjdk-amd64
+ javaOptions = -Djavax.net.ssl.trustStore=/var/gerrit/etc/keystore
+ javaOptions = -Xms300m
+ javaOptions = -Xmx300m
diff --git a/charts/k8s-gerrit/supplements/nfs.minikube.values.yaml b/charts/k8s-gerrit/supplements/nfs.minikube.values.yaml
new file mode 100644
index 0000000..dee0761
--- /dev/null
+++ b/charts/k8s-gerrit/supplements/nfs.minikube.values.yaml
@@ -0,0 +1,20 @@
+replicaCount: 1
+
+storageClass:
+ create: true
+ defaultClass: false
+ # The name of the StorageClass has to be the same as the one defined in the
+ # gerrit chart for `storageClasses.shared.name`
+ name: shared-storage
+ parameters:
+ # Required!
+ mountOptions: vers=4.1
+ reclaimPolicy: Delete
+
+resources:
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ limits:
+ cpu: 100m
+ memory: 256Mi
diff --git a/charts/k8s-gerrit/supplements/test-cluster/deploy.sh b/charts/k8s-gerrit/supplements/test-cluster/deploy.sh
new file mode 100755
index 0000000..d157485
--- /dev/null
+++ b/charts/k8s-gerrit/supplements/test-cluster/deploy.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+SCRIPTPATH=`dirname $(readlink -f $0)`
+
+if test -n "$(grep '#TODO' $SCRIPTPATH/**/*.yaml)"; then
+ echo "Incomplete configuration. Replace '#TODO' comments with valid configuration."
+ exit 1
+fi
+
+kubectl apply -f nfs/resources
+helm upgrade nfs-subdir-external-provisioner \
+ nfs-subdir-external-provisioner/nfs-subdir-external-provisioner \
+ --values nfs/nfs-provisioner.values.yaml \
+ --namespace nfs \
+ --install
+
+kubectl apply -f ldap
+kubectl apply -f ingress
+istioctl install -f "$SCRIPTPATH/../../istio/gerrit.profile.yaml"
diff --git a/charts/k8s-gerrit/supplements/test-cluster/ingress/nginx-ingress-controller.yaml b/charts/k8s-gerrit/supplements/test-cluster/ingress/nginx-ingress-controller.yaml
new file mode 100644
index 0000000..afda778
--- /dev/null
+++ b/charts/k8s-gerrit/supplements/test-cluster/ingress/nginx-ingress-controller.yaml
@@ -0,0 +1,627 @@
+#https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.2.0/deploy/static/provider/cloud/deploy.yaml
+apiVersion: v1
+kind: Namespace
+metadata:
+ labels:
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ name: ingress-nginx
+---
+apiVersion: v1
+automountServiceAccountToken: true
+kind: ServiceAccount
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.2.0
+ name: ingress-nginx
+ namespace: ingress-nginx
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ app.kubernetes.io/component: admission-webhook
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.2.0
+ name: ingress-nginx-admission
+ namespace: ingress-nginx
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.2.0
+ name: ingress-nginx
+ namespace: ingress-nginx
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - get
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - pods
+ - secrets
+ - endpoints
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses/status
+ verbs:
+ - update
+- apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingressclasses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resourceNames:
+ - ingress-controller-leader
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - create
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ app.kubernetes.io/component: admission-webhook
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.2.0
+ name: ingress-nginx-admission
+ namespace: ingress-nginx
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.2.0
+ name: ingress-nginx
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - endpoints
+ - nodes
+ - pods
+ - secrets
+ - namespaces
+ verbs:
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+- apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+- apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses/status
+ verbs:
+ - update
+- apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingressclasses
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: admission-webhook
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.2.0
+ name: ingress-nginx-admission
+rules:
+- apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - validatingwebhookconfigurations
+ verbs:
+ - get
+ - update
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.2.0
+ name: ingress-nginx
+ namespace: ingress-nginx
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: ingress-nginx
+subjects:
+- kind: ServiceAccount
+ name: ingress-nginx
+ namespace: ingress-nginx
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/component: admission-webhook
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.2.0
+ name: ingress-nginx-admission
+ namespace: ingress-nginx
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: ingress-nginx-admission
+subjects:
+- kind: ServiceAccount
+ name: ingress-nginx-admission
+ namespace: ingress-nginx
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.2.0
+ name: ingress-nginx
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ingress-nginx
+subjects:
+- kind: ServiceAccount
+ name: ingress-nginx
+ namespace: ingress-nginx
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/component: admission-webhook
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.2.0
+ name: ingress-nginx-admission
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ingress-nginx-admission
+subjects:
+- kind: ServiceAccount
+ name: ingress-nginx-admission
+ namespace: ingress-nginx
+---
+apiVersion: v1
+data:
+ allow-snippet-annotations: "true"
+kind: ConfigMap
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.2.0
+ name: ingress-nginx-controller
+ namespace: ingress-nginx
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.2.0
+ annotations:
+ # NOTE: This only works when using Gardener to manage the cluster
+ cert.gardener.cloud/commonName: #TODO: wildcard ingress URL, e.g. "*.example.com"
+ cert.gardener.cloud/purpose: managed
+ cert.gardener.cloud/secretname: tls-secret
+ dns.gardener.cloud/class: garden
+ dns.gardener.cloud/dnsnames: #TODO: wildcard ingress URL, e.g. "*.example.com"
+ dns.gardener.cloud/ttl: "600"
+ name: ingress-nginx-controller
+ namespace: ingress-nginx
+spec:
+ externalTrafficPolicy: Local
+ ports:
+ - appProtocol: http
+ name: http
+ port: 80
+ protocol: TCP
+ targetPort: http
+ - appProtocol: https
+ name: https
+ port: 443
+ protocol: TCP
+ targetPort: https
+ selector:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ type: LoadBalancer
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.2.0
+ name: ingress-nginx-controller-admission
+ namespace: ingress-nginx
+spec:
+ ports:
+ - appProtocol: https
+ name: https-webhook
+ port: 443
+ targetPort: webhook
+ selector:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ type: ClusterIP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.2.0
+ name: ingress-nginx-controller
+ namespace: ingress-nginx
+spec:
+ minReadySeconds: 0
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ spec:
+ containers:
+ - args:
+ - /nginx-ingress-controller
+ - --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller
+ - --election-id=ingress-controller-leader
+ - --controller-class=k8s.io/ingress-nginx
+ - --ingress-class=nginx
+ - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
+ - --validating-webhook=:8443
+ - --validating-webhook-certificate=/usr/local/certificates/cert
+ - --validating-webhook-key=/usr/local/certificates/key
+ - --default-ssl-certificate=ingress-nginx/tls-secret
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: LD_PRELOAD
+ value: /usr/local/lib/libmimalloc.so
+ image: k8s.gcr.io/ingress-nginx/controller:v1.2.0@sha256:d8196e3bc1e72547c5dec66d6556c0ff92a23f6d0919b206be170bc90d5f9185
+ imagePullPolicy: IfNotPresent
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /wait-shutdown
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ name: controller
+ ports:
+ - containerPort: 80
+ name: http
+ protocol: TCP
+ - containerPort: 443
+ name: https
+ protocol: TCP
+ - containerPort: 8443
+ name: webhook
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /healthz
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ resources:
+ requests:
+ cpu: 100m
+ memory: 90Mi
+ securityContext:
+ allowPrivilegeEscalation: true
+ capabilities:
+ add:
+ - NET_BIND_SERVICE
+ drop:
+ - ALL
+ runAsUser: 101
+ volumeMounts:
+ - mountPath: /usr/local/certificates/
+ name: webhook-cert
+ readOnly: true
+ dnsPolicy: ClusterFirst
+ nodeSelector:
+ kubernetes.io/os: linux
+ serviceAccountName: ingress-nginx
+ terminationGracePeriodSeconds: 300
+ volumes:
+ - name: webhook-cert
+ secret:
+ secretName: ingress-nginx-admission
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ labels:
+ app.kubernetes.io/component: admission-webhook
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.2.0
+ name: ingress-nginx-admission-create
+ namespace: ingress-nginx
+spec:
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: admission-webhook
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.2.0
+ name: ingress-nginx-admission-create
+ spec:
+ containers:
+ - args:
+ - create
+ - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
+ - --namespace=$(POD_NAMESPACE)
+ - --secret-name=ingress-nginx-admission
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660
+ imagePullPolicy: IfNotPresent
+ name: create
+ securityContext:
+ allowPrivilegeEscalation: false
+ nodeSelector:
+ kubernetes.io/os: linux
+ restartPolicy: OnFailure
+ securityContext:
+ fsGroup: 2000
+ runAsNonRoot: true
+ runAsUser: 2000
+ serviceAccountName: ingress-nginx-admission
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ labels:
+ app.kubernetes.io/component: admission-webhook
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.2.0
+ name: ingress-nginx-admission-patch
+ namespace: ingress-nginx
+spec:
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: admission-webhook
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.2.0
+ name: ingress-nginx-admission-patch
+ spec:
+ containers:
+ - args:
+ - patch
+ - --webhook-name=ingress-nginx-admission
+ - --namespace=$(POD_NAMESPACE)
+ - --patch-mutating=false
+ - --secret-name=ingress-nginx-admission
+ - --patch-failure-policy=Fail
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660
+ imagePullPolicy: IfNotPresent
+ name: patch
+ securityContext:
+ allowPrivilegeEscalation: false
+ nodeSelector:
+ kubernetes.io/os: linux
+ restartPolicy: OnFailure
+ securityContext:
+ fsGroup: 2000
+ runAsNonRoot: true
+ runAsUser: 2000
+ serviceAccountName: ingress-nginx-admission
+---
+apiVersion: networking.k8s.io/v1
+kind: IngressClass
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.2.0
+ name: nginx
+spec:
+ controller: k8s.io/ingress-nginx
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ labels:
+ app.kubernetes.io/component: admission-webhook
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.2.0
+ name: ingress-nginx-admission
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: ingress-nginx-controller-admission
+ namespace: ingress-nginx
+ path: /networking/v1/ingresses
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: validate.nginx.ingress.kubernetes.io
+ rules:
+ - apiGroups:
+ - networking.k8s.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - ingresses
+ sideEffects: None
diff --git a/charts/k8s-gerrit/supplements/test-cluster/ldap/openldap.yaml b/charts/k8s-gerrit/supplements/test-cluster/ldap/openldap.yaml
new file mode 100644
index 0000000..e6e42cc
--- /dev/null
+++ b/charts/k8s-gerrit/supplements/test-cluster/ldap/openldap.yaml
@@ -0,0 +1,85 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: openldap
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: openldap-admin
+ namespace: openldap
+ labels:
+ app: gerrit
+data:
+ adminpassword: #TODO
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: openldap-users
+ namespace: openldap
+ labels:
+ app: gerrit
+data:
+ users: gerrit-admin,gerrit-user
+ passwords: #TODO
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: openldap
+ namespace: openldap
+ labels:
+ app.kubernetes.io/name: openldap
+spec:
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: openldap
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: openldap
+ spec:
+ containers:
+ - name: openldap
+ image: docker.io/bitnami/openldap:latest
+ imagePullPolicy: "IfNotPresent"
+ env:
+ - name: LDAP_ADMIN_USERNAME
+ value: "admin"
+ - name: LDAP_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ key: adminpassword
+ name: openldap-admin
+ - name: LDAP_USERS
+ valueFrom:
+ secretKeyRef:
+ key: users
+ name: openldap-users
+ - name: LDAP_PASSWORDS
+ valueFrom:
+ secretKeyRef:
+ key: passwords
+ name: openldap-users
+ ports:
+ - name: tcp-ldap
+ containerPort: 1389
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: openldap
+ namespace: openldap
+ labels:
+ app.kubernetes.io/name: openldap
+spec:
+ type: ClusterIP
+ ports:
+ - name: tcp-ldap
+ port: 1389
+ targetPort: tcp-ldap
+ selector:
+ app.kubernetes.io/name: openldap
+
diff --git a/charts/k8s-gerrit/supplements/test-cluster/nfs/nfs-provisioner.values.yaml b/charts/k8s-gerrit/supplements/test-cluster/nfs/nfs-provisioner.values.yaml
new file mode 100644
index 0000000..54ddd36
--- /dev/null
+++ b/charts/k8s-gerrit/supplements/test-cluster/nfs/nfs-provisioner.values.yaml
@@ -0,0 +1,8 @@
+nfs:
+ server: #TODO
+ path: #TODO
+
+storageClass:
+ reclaimPolicy: Delete
+ archiveOnDelete: false
+ onDelete: delete
diff --git a/charts/k8s-gerrit/supplements/test-cluster/nfs/resources/nfs.namespace.yaml b/charts/k8s-gerrit/supplements/test-cluster/nfs/resources/nfs.namespace.yaml
new file mode 100644
index 0000000..6545cb0
--- /dev/null
+++ b/charts/k8s-gerrit/supplements/test-cluster/nfs/resources/nfs.namespace.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: nfs
diff --git a/charts/k8s-gerrit/tests/conftest.py b/charts/k8s-gerrit/tests/conftest.py
new file mode 100644
index 0000000..eefb8f9
--- /dev/null
+++ b/charts/k8s-gerrit/tests/conftest.py
@@ -0,0 +1,337 @@
+# pylint: disable=W0613, W0212
+
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import getpass
+import os
+import sys
+
+from pathlib import Path
+
+import docker
+import pygit2 as git
+import pytest
+
+sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
+
+# pylint: disable=C0103
+pytest_plugins = ["fixtures.credentials", "fixtures.cluster", "fixtures.helm.gerrit"]
+
+# Base images that are not published and thus only tagged with "latest"
+BASE_IMGS = ["base", "gerrit-base"]
+
+
+# pylint: disable=W0622
+class PasswordPromptAction(argparse.Action):
+ def __init__(
+ self,
+ option_strings,
+ dest=None,
+ nargs=0,
+ default=None,
+ required=False,
+ type=None,
+ metavar=None,
+ help=None,
+ ):
+ super().__init__(
+ option_strings=option_strings,
+ dest=dest,
+ nargs=nargs,
+ default=default,
+ required=required,
+ metavar=metavar,
+ type=type,
+ help=help,
+ )
+
+ def __call__(self, parser, args, values, option_string=None):
+ password = getpass.getpass()
+ setattr(args, self.dest, password)
+
+
+def pytest_addoption(parser):
+ parser.addoption(
+ "--registry",
+ action="store",
+ default="",
+ help="Container registry to push (if --push=true) and pull container images"
+ + "from for tests on Kubernetes clusters (default: '')",
+ )
+ parser.addoption(
+ "--registry-user",
+ action="store",
+ default="",
+ help="Username for container registry (default: '')",
+ )
+ parser.addoption(
+ "--registry-pwd",
+ action="store",
+ default="",
+ help="Password for container registry (default: '')",
+ )
+ parser.addoption(
+ "--org",
+ action="store",
+ default="k8sgerrit",
+ help="Docker organization (default: 'k8sgerrit')",
+ )
+ parser.addoption(
+ "--push",
+ action="store_true",
+ help="If set, the docker images will be pushed to the registry configured"
+ + "by --registry (default: false)",
+ )
+ parser.addoption(
+ "--tag",
+ action="store",
+ default=None,
+ help="Tag of cached container images to test. Missing images will be built."
+ + "(default: All container images will be built)",
+ )
+ parser.addoption(
+ "--build-cache",
+ action="store_true",
+ help="If set, the docker cache will be used when building container images.",
+ )
+ parser.addoption(
+ "--kubeconfig",
+ action="store",
+ default=None,
+ help="Kubeconfig to use for cluster connection. If none is given the currently"
+ + "configured context is used.",
+ )
+ parser.addoption(
+ "--rwm-storageclass",
+ action="store",
+ default="shared-storage",
+ help="Name of the storageclass used for ReadWriteMany access."
+ + "(default: shared-storage)",
+ )
+ parser.addoption(
+ "--ingress-url",
+ action="store",
+ default=None,
+ help="URL of the ingress domain used by the cluster.",
+ )
+ parser.addoption(
+ "--gerrit-user",
+ action="store",
+ default="admin",
+ help="Gerrit admin username to be used for smoke tests. (default: admin)",
+ )
+ parser.addoption(
+ "--gerrit-pwd",
+ action=PasswordPromptAction,
+ default="secret",
+ help="Gerrit admin password to be used for smoke tests. (default: secret)",
+ )
+ parser.addoption(
+ "--skip-slow", action="store_true", help="If set, skip slow tests."
+ )
+
+
+def pytest_collection_modifyitems(config, items):
+ if config.getoption("--skip-slow"):
+ skip_slow = pytest.mark.skip(reason="--skip-slow was set.")
+ for item in items:
+ if "slow" in item.keywords:
+ item.add_marker(skip_slow)
+
+
+def pytest_runtest_makereport(item, call):
+ if "incremental" in item.keywords:
+ if call.excinfo is not None:
+ parent = item.parent
+ parent._previousfailed = item
+
+
+def pytest_runtest_setup(item):
+ if "incremental" in item.keywords:
+ previousfailed = getattr(item.parent, "_previousfailed", None)
+ if previousfailed is not None:
+ pytest.xfail(f"previous test failed ({previousfailed.name})")
+
+
+@pytest.fixture(scope="session")
+def tag_of_cached_container(request):
+ return request.config.getoption("--tag")
+
+
+@pytest.fixture(scope="session")
+def docker_client():
+ return docker.from_env()
+
+
+@pytest.fixture(scope="session")
+def repository_root():
+ return Path(git.discover_repository(os.path.realpath(__file__))).parent.absolute()
+
+
+@pytest.fixture(scope="session")
+def container_images(repository_root):
+ image_paths = {}
+ for directory in os.listdir(os.path.join(repository_root, "container-images")):
+ image_paths[directory] = os.path.join(
+ repository_root, "container-images", directory
+ )
+ return image_paths
+
+
+@pytest.fixture(scope="session")
+def docker_registry(request):
+ registry = request.config.getoption("--registry")
+ if registry and not registry[-1] == "/":
+ registry += "/"
+ return registry
+
+
+@pytest.fixture(scope="session")
+def docker_org(request):
+ org = request.config.getoption("--org")
+ if org and not org[-1] == "/":
+ org += "/"
+ return org
+
+
+@pytest.fixture(scope="session")
+def docker_tag(tag_of_cached_container, repository_root):
+ if tag_of_cached_container:
+ return tag_of_cached_container
+ return git.Repository(repository_root).describe(dirty_suffix="-dirty")
+
+
+@pytest.fixture(scope="session")
+def docker_build(
+ request,
+ docker_client,
+ tag_of_cached_container,
+ docker_registry,
+ docker_org,
+ docker_tag,
+):
+ def docker_build(image, name):
+ if name in BASE_IMGS:
+ image_name = f"{name}:latest"
+ else:
+ image_name = f"{docker_registry}{docker_org}{name}:{docker_tag}"
+
+ if tag_of_cached_container:
+ try:
+ return docker_client.images.get(image_name)
+ except docker.errors.ImageNotFound:
+ print(f"Image {image_name} could not be loaded. Building it now.")
+
+ no_cache = not request.config.getoption("--build-cache")
+
+ build = docker_client.images.build(
+ path=image,
+ nocache=no_cache,
+ rm=True,
+ tag=image_name,
+ platform="linux/amd64",
+ )
+ return build[0]
+
+ return docker_build
+
+
+@pytest.fixture(scope="session")
+def docker_login(request, docker_client, docker_registry):
+ username = request.config.getoption("--registry-user")
+ if username:
+ docker_client.login(
+ username=username,
+ password=request.config.getoption("--registry-pwd"),
+ registry=docker_registry,
+ )
+
+
+@pytest.fixture(scope="session")
+def docker_push(
+ request, docker_client, docker_registry, docker_login, docker_org, docker_tag
+):
+ def docker_push(image):
+ docker_repository = f"{docker_registry}{docker_org}{image}"
+ docker_client.images.push(docker_repository, tag=docker_tag)
+
+ return docker_push
+
+
+@pytest.fixture(scope="session")
+def docker_network(request, docker_client):
+ network = docker_client.networks.create(
+ name="k8sgerrit-test-network", scope="local"
+ )
+
+ yield network
+
+ network.remove()
+
+
+@pytest.fixture(scope="session")
+def base_image(container_images, docker_build):
+ return docker_build(container_images["base"], "base")
+
+
+@pytest.fixture(scope="session")
+def gerrit_base_image(container_images, docker_build, base_image):
+ return docker_build(container_images["gerrit-base"], "gerrit-base")
+
+
+@pytest.fixture(scope="session")
+def gitgc_image(request, container_images, docker_build, docker_push, base_image):
+ gitgc_image = docker_build(container_images["git-gc"], "git-gc")
+ if request.config.getoption("--push"):
+ docker_push("git-gc")
+ return gitgc_image
+
+
+@pytest.fixture(scope="session")
+def apache_git_http_backend_image(
+ request, container_images, docker_build, docker_push, base_image
+):
+ apache_git_http_backend_image = docker_build(
+ container_images["apache-git-http-backend"], "apache-git-http-backend"
+ )
+ if request.config.getoption("--push"):
+ docker_push("apache-git-http-backend")
+ return apache_git_http_backend_image
+
+
+@pytest.fixture(scope="session")
+def gerrit_image(
+ request, container_images, docker_build, docker_push, base_image, gerrit_base_image
+):
+ gerrit_image = docker_build(container_images["gerrit"], "gerrit")
+ if request.config.getoption("--push"):
+ docker_push("gerrit")
+ return gerrit_image
+
+
+@pytest.fixture(scope="session")
+def gerrit_init_image(
+ request, container_images, docker_build, docker_push, base_image, gerrit_base_image
+):
+ gerrit_init_image = docker_build(container_images["gerrit-init"], "gerrit-init")
+ if request.config.getoption("--push"):
+ docker_push("gerrit-init")
+ return gerrit_init_image
+
+
+@pytest.fixture(scope="session")
+def required_plugins(request):
+ return ["healthcheck"]
diff --git a/charts/k8s-gerrit/tests/container-images/apache-git-http-backend/conftest.py b/charts/k8s-gerrit/tests/container-images/apache-git-http-backend/conftest.py
new file mode 100644
index 0000000..8cd3443
--- /dev/null
+++ b/charts/k8s-gerrit/tests/container-images/apache-git-http-backend/conftest.py
@@ -0,0 +1,92 @@
+# pylint: disable=W0613
+
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import random
+import string
+import time
+
+import pytest
+
+
+class GitBackendContainer:
+ def __init__(self, docker_client, image, port, credentials_dir):
+ self.docker_client = docker_client
+ self.image = image
+ self.port = port
+ self.apache_credentials_dir = credentials_dir
+
+ self.container = None
+
+ def start(self):
+ self.container = self.docker_client.containers.run(
+ image=self.image.id,
+ ports={"80": self.port},
+ volumes={
+ self.apache_credentials_dir: {
+ "bind": "/var/apache/credentials",
+ "mode": "ro",
+ }
+ },
+ detach=True,
+ auto_remove=True,
+ platform="linux/amd64",
+ )
+
+ def stop(self):
+ self.container.stop(timeout=1)
+
+
+@pytest.fixture(scope="module")
+def container_run_factory(
+ docker_client, apache_git_http_backend_image, htpasswd, credentials_dir
+):
+ def run_container(port):
+ return GitBackendContainer(
+ docker_client,
+ apache_git_http_backend_image,
+ port,
+ str(credentials_dir),
+ )
+
+ return run_container
+
+
+@pytest.fixture(scope="module")
+def container_run(container_run_factory, free_port):
+ test_setup = container_run_factory(free_port)
+ test_setup.start()
+ time.sleep(3)
+
+ yield test_setup
+
+ test_setup.stop()
+
+
+@pytest.fixture(scope="module")
+def base_url(container_run):
+ return f"http://localhost:{container_run.port}"
+
+
+@pytest.fixture(scope="function")
+def random_repo_name():
+ return "".join(
+ [random.choice(string.ascii_letters + string.digits) for n in range(8)]
+ )
+
+
+@pytest.fixture(scope="function")
+def repo_creation_url(base_url, random_repo_name):
+ return f"{base_url}/a/projects/{random_repo_name}"
diff --git a/charts/k8s-gerrit/tests/container-images/apache-git-http-backend/test_container_build_apache_git_http_backend.py b/charts/k8s-gerrit/tests/container-images/apache-git-http-backend/test_container_build_apache_git_http_backend.py
new file mode 100644
index 0000000..984d6be
--- /dev/null
+++ b/charts/k8s-gerrit/tests/container-images/apache-git-http-backend/test_container_build_apache_git_http_backend.py
@@ -0,0 +1,24 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+
+@pytest.mark.structure
+def test_build_apache_git_http_backend_image(
+ apache_git_http_backend_image, tag_of_cached_container
+):
+ if tag_of_cached_container:
+ pytest.skip("Cached image used for testing. Build will not be tested.")
+ assert apache_git_http_backend_image.id is not None
diff --git a/charts/k8s-gerrit/tests/container-images/apache-git-http-backend/test_container_integration_apache_git_http_backend.py b/charts/k8s-gerrit/tests/container-images/apache-git-http-backend/test_container_integration_apache_git_http_backend.py
new file mode 100755
index 0000000..0d5ef65
--- /dev/null
+++ b/charts/k8s-gerrit/tests/container-images/apache-git-http-backend/test_container_integration_apache_git_http_backend.py
@@ -0,0 +1,96 @@
+# pylint: disable=W0613
+
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from pathlib import Path
+
+import os.path
+
+import pygit2 as git
+import pytest
+import requests
+
+
+@pytest.fixture(scope="function")
+def repo_dir(tmp_path_factory, random_repo_name):
+ return tmp_path_factory.mktemp(random_repo_name)
+
+
+@pytest.fixture(scope="function")
+def mock_repo(repo_dir):
+ repo = git.init_repository(repo_dir, False)
+ file_name = os.path.join(repo_dir, "test.txt")
+ Path(file_name).touch()
+ repo.index.add("test.txt")
+ repo.index.write()
+ # pylint: disable=E1101
+ author = git.Signature("Gerrit Review", "gerrit@review.com")
+ committer = git.Signature("Gerrit Review", "gerrit@review.com")
+ message = "Initial commit"
+ tree = repo.index.write_tree()
+ repo.create_commit("HEAD", author, committer, message, tree, [])
+ return repo
+
+
+@pytest.mark.docker
+@pytest.mark.integration
+def test_apache_git_http_backend_repo_creation(
+ container_run, htpasswd, repo_creation_url
+):
+ request = requests.put(
+ repo_creation_url,
+ auth=requests.auth.HTTPBasicAuth(htpasswd["user"], htpasswd["password"]),
+ )
+ assert request.status_code == 201
+
+
+@pytest.mark.docker
+@pytest.mark.integration
+def test_apache_git_http_backend_repo_creation_fails_without_credentials(
+ container_run, repo_creation_url
+):
+ request = requests.put(repo_creation_url)
+ assert request.status_code == 401
+
+
+@pytest.mark.docker
+@pytest.mark.integration
+def test_apache_git_http_backend_repo_creation_fails_wrong_fs_permissions(
+ container_run, htpasswd, repo_creation_url
+):
+ container_run.container.exec_run("chown -R root:root /var/gerrit/git")
+ request = requests.put(
+ repo_creation_url,
+ auth=requests.auth.HTTPBasicAuth(htpasswd["user"], htpasswd["password"]),
+ )
+ container_run.container.exec_run("chown -R gerrit:users /var/gerrit/git")
+ assert request.status_code == 500
+
+
+@pytest.mark.docker
+@pytest.mark.integration
+def test_apache_git_http_backend_repo_creation_push_repo(
+ container_run, base_url, htpasswd, mock_repo, random_repo_name
+):
+ container_run.container.exec_run(
+ f"su -c 'git init --bare /var/gerrit/git/{random_repo_name}.git' gerrit"
+ )
+ url = f"{base_url}/{random_repo_name}.git"
+ url = url.replace("//", f"//{htpasswd['user']}:{htpasswd['password']}@")
+ origin = mock_repo.remotes.create("origin", url)
+ origin.push(["refs/heads/master:refs/heads/master"])
+
+ remote_refs = origin.ls_remotes()
+ assert str(remote_refs[0]["oid"]) == mock_repo.revparse_single("HEAD").hex
diff --git a/charts/k8s-gerrit/tests/container-images/apache-git-http-backend/test_container_structure_apache_git_http_backend.py b/charts/k8s-gerrit/tests/container-images/apache-git-http-backend/test_container_structure_apache_git_http_backend.py
new file mode 100755
index 0000000..a138ef5
--- /dev/null
+++ b/charts/k8s-gerrit/tests/container-images/apache-git-http-backend/test_container_structure_apache_git_http_backend.py
@@ -0,0 +1,61 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+import utils
+
+
+# pylint: disable=E1101
+@pytest.mark.structure
+def test_apache_git_http_backend_inherits_from_base(apache_git_http_backend_image):
+ assert utils.check_if_ancestor_image_is_inherited(
+ apache_git_http_backend_image, "base:latest"
+ )
+
+
+@pytest.mark.docker
+@pytest.mark.structure
+def test_apache_git_http_backend_contains_apache2(container_run):
+ exit_code, _ = container_run.container.exec_run("which httpd")
+ assert exit_code == 0
+
+
+@pytest.mark.docker
+@pytest.mark.structure
+def test_apache_git_http_backend_http_site_configured(container_run):
+ exit_code, _ = container_run.container.exec_run(
+ "test -f /etc/apache2/conf.d/git-http-backend.conf"
+ )
+ assert exit_code == 0
+
+
+@pytest.mark.docker
+@pytest.mark.structure
+def test_apache_git_http_backend_contains_start_script(container_run):
+ exit_code, _ = container_run.container.exec_run("test -f /var/tools/start")
+ assert exit_code == 0
+
+
+@pytest.mark.docker
+@pytest.mark.structure
+def test_apache_git_http_backend_contains_repo_creation_cgi_script(container_run):
+ exit_code, _ = container_run.container.exec_run("test -f /var/cgi/project_admin.sh")
+ assert exit_code == 0
+
+
+@pytest.mark.structure
+def test_apache_git_http_backend_has_entrypoint(apache_git_http_backend_image):
+ entrypoint = apache_git_http_backend_image.attrs["ContainerConfig"]["Entrypoint"]
+ assert len(entrypoint) == 2
+ assert entrypoint[1] == "/var/tools/start"
diff --git a/charts/k8s-gerrit/tests/container-images/base/test_container_build_base.py b/charts/k8s-gerrit/tests/container-images/base/test_container_build_base.py
new file mode 100644
index 0000000..2a3afa5
--- /dev/null
+++ b/charts/k8s-gerrit/tests/container-images/base/test_container_build_base.py
@@ -0,0 +1,22 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+
+@pytest.mark.structure
+def test_build_base(base_image, tag_of_cached_container):
+ if tag_of_cached_container:
+ pytest.skip("Cached image used for testing. Build will not be tested.")
+ assert base_image.id is not None
diff --git a/charts/k8s-gerrit/tests/container-images/base/test_container_structure_base.py b/charts/k8s-gerrit/tests/container-images/base/test_container_structure_base.py
new file mode 100755
index 0000000..528d2b4
--- /dev/null
+++ b/charts/k8s-gerrit/tests/container-images/base/test_container_structure_base.py
@@ -0,0 +1,45 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+
+@pytest.fixture(scope="module")
+def container_run(docker_client, container_endless_run_factory, base_image):
+ container_run = container_endless_run_factory(docker_client, base_image)
+ yield container_run
+ container_run.stop(timeout=1)
+
+
+@pytest.mark.docker
+@pytest.mark.structure
+def test_base_contains_git(container_run):
+ exit_code, _ = container_run.exec_run("which git")
+ assert exit_code == 0
+
+
+@pytest.mark.docker
+@pytest.mark.structure
+def test_base_has_non_root_user_gerrit(container_run):
+ exit_code, output = container_run.exec_run("id -u gerrit")
+ assert exit_code == 0
+ uid = int(output.strip().decode("utf-8"))
+ assert uid != 0
+
+
+@pytest.mark.docker
+@pytest.mark.structure
+def test_base_gerrit_no_root_permissions(container_run):
+ exit_code, _ = container_run.exec_run("su -c 'rm -rf /bin' gerrit")
+ assert exit_code > 0
diff --git a/charts/k8s-gerrit/tests/container-images/conftest.py b/charts/k8s-gerrit/tests/container-images/conftest.py
new file mode 100644
index 0000000..8a4b8f2
--- /dev/null
+++ b/charts/k8s-gerrit/tests/container-images/conftest.py
@@ -0,0 +1,105 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os.path
+import socket
+
+import pytest
+
+
+class GerritContainer:
+ def __init__(self, docker_client, docker_network, tmp_dir, image, configs, port):
+ self.docker_client = docker_client
+ self.docker_network = docker_network
+ self.tmp_dir = tmp_dir
+ self.image = image
+ self.configs = configs
+ self.port = port
+
+ self.container = None
+
+ def _create_config_files(self):
+ tmp_config_dir = os.path.join(self.tmp_dir, "configs")
+ if not os.path.isdir(tmp_config_dir):
+ os.mkdir(tmp_config_dir)
+ config_paths = {}
+ for filename, content in self.configs.items():
+ gerrit_config_file = os.path.join(tmp_config_dir, filename)
+ with open(gerrit_config_file, "w", encoding="utf-8") as config_file:
+ config_file.write(content)
+ config_paths[filename] = gerrit_config_file
+ return config_paths
+
+ def _define_volume_mounts(self):
+ volumes = {
+ v: {"bind": f"/var/gerrit/etc/{k}", "mode": "rw"}
+ for (k, v) in self._create_config_files().items()
+ }
+ volumes[os.path.join(self.tmp_dir, "lib")] = {
+ "bind": "/var/gerrit/lib",
+ "mode": "rw",
+ }
+ return volumes
+
+ def start(self):
+ self.container = self.docker_client.containers.run(
+ image=self.image.id,
+ user="gerrit",
+ volumes=self._define_volume_mounts(),
+ ports={8080: str(self.port)},
+ network=self.docker_network.name,
+ detach=True,
+ auto_remove=True,
+ platform="linux/amd64",
+ )
+
+ def stop(self):
+ self.container.stop(timeout=1)
+
+
+@pytest.fixture(scope="session")
+def gerrit_container_factory():
+ def get_gerrit_container(
+ docker_client, docker_network, tmp_dir, image, gerrit_config, port
+ ):
+ return GerritContainer(
+ docker_client, docker_network, tmp_dir, image, gerrit_config, port
+ )
+
+ return get_gerrit_container
+
+
+@pytest.fixture(scope="session")
+def container_endless_run_factory():
+ def get_container(docker_client, image):
+ return docker_client.containers.run(
+ image=image.id,
+ entrypoint="/bin/ash",
+ command=["-c", "tail -f /dev/null"],
+ user="gerrit",
+ detach=True,
+ auto_remove=True,
+ platform="linux/amd64",
+ )
+
+ return get_container
+
+
+@pytest.fixture(scope="session")
+def free_port():
+ skt = socket.socket()
+ skt.bind(("", 0))
+ port = skt.getsockname()[1]
+ skt.close()
+ return port
diff --git a/charts/k8s-gerrit/tests/container-images/gerrit-base/test_container_build_gerrit_base.py b/charts/k8s-gerrit/tests/container-images/gerrit-base/test_container_build_gerrit_base.py
new file mode 100644
index 0000000..93954d8
--- /dev/null
+++ b/charts/k8s-gerrit/tests/container-images/gerrit-base/test_container_build_gerrit_base.py
@@ -0,0 +1,22 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+
+@pytest.mark.structure
+def test_build_gerrit_base(gerrit_base_image, tag_of_cached_container):
+ if tag_of_cached_container:
+ pytest.skip("Cached image used for testing. Build will not be tested.")
+ assert gerrit_base_image.id is not None
diff --git a/charts/k8s-gerrit/tests/container-images/gerrit-base/test_container_structure_gerrit_base.py b/charts/k8s-gerrit/tests/container-images/gerrit-base/test_container_structure_gerrit_base.py
new file mode 100755
index 0000000..05161b2
--- /dev/null
+++ b/charts/k8s-gerrit/tests/container-images/gerrit-base/test_container_structure_gerrit_base.py
@@ -0,0 +1,100 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+
+import pytest
+
+import utils
+
+
+JAVA_VER = 11
+
+
+@pytest.fixture(scope="module")
+def container_run(docker_client, container_endless_run_factory, gerrit_base_image):
+ container_run = container_endless_run_factory(docker_client, gerrit_base_image)
+ yield container_run
+ container_run.stop(timeout=1)
+
+
+# pylint: disable=E1101
+@pytest.mark.structure
+def test_gerrit_base_inherits_from_base(gerrit_base_image):
+ assert utils.check_if_ancestor_image_is_inherited(gerrit_base_image, "base:latest")
+
+
+@pytest.mark.docker
+@pytest.mark.structure
+def test_gerrit_base_contains_java(container_run):
+ _, output = container_run.exec_run("java -version")
+ output = output.strip().decode("utf-8")
+ assert re.search(re.compile(f'openjdk version "{JAVA_VER}.[0-9.]+"'), output)
+
+
+@pytest.mark.docker
+@pytest.mark.structure
+def test_gerrit_base_java_path(container_run):
+ exit_code, output = container_run.exec_run(
+ '/bin/ash -c "readlink -f $(which java)"'
+ )
+ output = output.strip().decode("utf-8")
+ assert exit_code == 0
+ assert output == f"/usr/lib/jvm/java-{JAVA_VER}-openjdk/bin/java"
+
+
+@pytest.mark.docker
+@pytest.mark.structure
+def test_gerrit_base_contains_gerrit_war(container_run):
+ exit_code, _ = container_run.exec_run("test -f /var/war/gerrit.war")
+ assert exit_code == 0
+
+ exit_code, _ = container_run.exec_run("test -f /var/gerrit/bin/gerrit.war")
+ assert exit_code == 0
+
+
+@pytest.mark.docker
+@pytest.mark.structure
+def test_gerrit_base_war_contains_gerrit(container_run):
+ exit_code, output = container_run.exec_run("java -jar /var/war/gerrit.war version")
+ assert exit_code == 0
+ output = output.strip().decode("utf-8")
+ assert re.search(re.compile("gerrit version.*"), output)
+
+ exit_code, output = container_run.exec_run(
+ "java -jar /var/gerrit/bin/gerrit.war version"
+ )
+ assert exit_code == 0
+ output = output.strip().decode("utf-8")
+ assert re.search(re.compile("gerrit version.*"), output)
+
+
+@pytest.mark.docker
+@pytest.mark.structure
+def test_gerrit_base_site_permissions(container_run):
+ exit_code, _ = container_run.exec_run("test -O /var/gerrit")
+ assert exit_code == 0
+
+
+@pytest.mark.docker
+@pytest.mark.structure
+def test_gerrit_base_war_dir_permissions(container_run):
+ exit_code, _ = container_run.exec_run("test -O /var/war")
+ assert exit_code == 0
+
+
+@pytest.mark.structure
+def test_gerrit_base_has_entrypoint(gerrit_base_image):
+ entrypoint = gerrit_base_image.attrs["ContainerConfig"]["Entrypoint"]
+ assert "/var/tools/start" in entrypoint
diff --git a/charts/k8s-gerrit/tests/container-images/gerrit-init/test_container_build_gerrit_init.py b/charts/k8s-gerrit/tests/container-images/gerrit-init/test_container_build_gerrit_init.py
new file mode 100644
index 0000000..dc16d74
--- /dev/null
+++ b/charts/k8s-gerrit/tests/container-images/gerrit-init/test_container_build_gerrit_init.py
@@ -0,0 +1,22 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+
+@pytest.mark.structure
+def test_build_gerrit_init(gerrit_init_image, tag_of_cached_container):
+ if tag_of_cached_container:
+ pytest.skip("Cached image used for testing. Build will not be tested.")
+ assert gerrit_init_image.id is not None
diff --git a/charts/k8s-gerrit/tests/container-images/gerrit-init/test_container_integration_gerrit_init.py b/charts/k8s-gerrit/tests/container-images/gerrit-init/test_container_integration_gerrit_init.py
new file mode 100644
index 0000000..4dac6e0
--- /dev/null
+++ b/charts/k8s-gerrit/tests/container-images/gerrit-init/test_container_integration_gerrit_init.py
@@ -0,0 +1,190 @@
+# pylint: disable=E1101
+
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os.path
+import re
+
+from docker.errors import NotFound
+
+import pytest
+import yaml
+
+
+@pytest.fixture(scope="class")
+def container_run_default(request, docker_client, gerrit_init_image, tmp_path_factory):
+ tmp_site_dir = tmp_path_factory.mktemp("gerrit_site")
+ container_run = docker_client.containers.run(
+ image=gerrit_init_image.id,
+ user="gerrit",
+ volumes={tmp_site_dir: {"bind": "/var/gerrit", "mode": "rw"}},
+ detach=True,
+ auto_remove=True,
+ platform="linux/amd64",
+ )
+
+ def stop_container():
+ try:
+ container_run.stop(timeout=1)
+ except NotFound:
+ print("Container already stopped.")
+
+ request.addfinalizer(stop_container)
+
+ return container_run
+
+
+@pytest.fixture(scope="class")
+def init_config_dir(tmp_path_factory):
+ return tmp_path_factory.mktemp("init_config")
+
+
+@pytest.fixture(scope="class")
+def tmp_site_dir(tmp_path_factory):
+ return tmp_path_factory.mktemp("gerrit_site")
+
+
+@pytest.fixture(scope="class")
+def container_run_endless(
+ docker_client, gerrit_init_image, init_config_dir, tmp_site_dir
+):
+ container_run = docker_client.containers.run(
+ image=gerrit_init_image.id,
+ entrypoint="/bin/ash",
+ command=["-c", "tail -f /dev/null"],
+ user="gerrit",
+ volumes={
+ tmp_site_dir: {"bind": "/var/gerrit", "mode": "rw"},
+ init_config_dir: {"bind": "/var/config", "mode": "rw"},
+ },
+ detach=True,
+ auto_remove=True,
+ platform="linux/amd64",
+ )
+
+ yield container_run
+ container_run.stop(timeout=1)
+
+
+@pytest.mark.docker
+@pytest.mark.incremental
+@pytest.mark.integration
+class TestGerritInitEmptySite:
+ @pytest.mark.timeout(60)
+ def test_gerrit_init_gerrit_is_initialized(self, container_run_default):
+ def wait_for_init_success_message():
+ log = container_run_default.logs().decode("utf-8")
+ return log, re.search(r"Initialized /var/gerrit", log)
+
+ while not wait_for_init_success_message():
+ continue
+
+ @pytest.mark.timeout(60)
+ def test_gerrit_init_exits_after_init(self, container_run_default):
+ assert container_run_default.wait()["StatusCode"] == 0
+
+
+@pytest.fixture(
+ scope="function",
+ params=[
+ ["replication", "reviewnotes"],
+ ["replication", "reviewnotes", "hooks"],
+ ["download-commands"],
+ [],
+ ],
+)
+def plugins_to_install(request):
+ return request.param
+
+
+@pytest.mark.docker
+@pytest.mark.incremental
+@pytest.mark.integration
+class TestGerritInitPluginInstallation:
+ def _configure_packaged_plugins(self, file_path, plugins):
+ with open(file_path, "w", encoding="utf-8") as f:
+ yaml.dump(
+ {"plugins": [{"name": p} for p in plugins]}, f, default_flow_style=False
+ )
+
+ def test_gerrit_init_plugins_are_installed(
+ self,
+ container_run_endless,
+ init_config_dir,
+ plugins_to_install,
+ tmp_site_dir,
+ required_plugins,
+ ):
+ self._configure_packaged_plugins(
+ os.path.join(init_config_dir, "init.yaml"), plugins_to_install
+ )
+
+ exit_code, _ = container_run_endless.exec_run(
+ "python3 /var/tools/gerrit-initializer -s /var/gerrit -c /var/config/init.yaml init"
+ )
+ assert exit_code == 0
+
+ plugins_path = os.path.join(tmp_site_dir, "plugins")
+
+ for plugin in plugins_to_install:
+ assert os.path.exists(os.path.join(plugins_path, f"{plugin}.jar"))
+
+ installed_plugins = os.listdir(plugins_path)
+ expected_plugins = plugins_to_install + required_plugins
+ for plugin in installed_plugins:
+ assert os.path.splitext(plugin)[0] in expected_plugins
+
+ def test_required_plugins_are_installed(
+ self, container_run_endless, init_config_dir, tmp_site_dir, required_plugins
+ ):
+ self._configure_packaged_plugins(
+ os.path.join(init_config_dir, "init.yaml"), ["hooks"]
+ )
+
+ exit_code, _ = container_run_endless.exec_run(
+ "python3 /var/tools/gerrit-initializer -s /var/gerrit -c /var/config/init.yaml init"
+ )
+ assert exit_code == 0
+
+ for plugin in required_plugins:
+ assert os.path.exists(
+ os.path.join(tmp_site_dir, "plugins", f"{plugin}.jar")
+ )
+
+ def test_libraries_are_symlinked(
+ self, container_run_endless, init_config_dir, tmp_site_dir
+ ):
+ with open(
+ os.path.join(init_config_dir, "init.yaml"), "w", encoding="utf-8"
+ ) as f:
+ yaml.dump(
+ {"plugins": [{"name": "hooks", "installAsLibrary": True}]},
+ f,
+ default_flow_style=False,
+ )
+
+ exit_code, _ = container_run_endless.exec_run(
+ "python3 /var/tools/gerrit-initializer -s /var/gerrit -c /var/config/init.yaml init"
+ )
+ assert exit_code == 0
+
+ assert os.path.exists(os.path.join(tmp_site_dir, "plugins", "hooks.jar"))
+ assert os.path.islink(os.path.join(tmp_site_dir, "lib", "hooks.jar"))
+
+ exit_code, output = container_run_endless.exec_run(
+ "readlink -f /var/gerrit/lib/hooks.jar"
+ )
+ assert exit_code == 0
+ assert output.decode("utf-8").strip() == "/var/gerrit/plugins/hooks.jar"
diff --git a/charts/k8s-gerrit/tests/container-images/gerrit-init/test_container_integration_gerrit_init_reindexing.py b/charts/k8s-gerrit/tests/container-images/gerrit-init/test_container_integration_gerrit_init_reindexing.py
new file mode 100644
index 0000000..c8a5b49
--- /dev/null
+++ b/charts/k8s-gerrit/tests/container-images/gerrit-init/test_container_integration_gerrit_init_reindexing.py
@@ -0,0 +1,160 @@
+# pylint: disable=E1101
+
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import pytest
+
+
+@pytest.fixture(scope="function")
+def temp_site(tmp_path_factory):
+ return tmp_path_factory.mktemp("gerrit-index-test")
+
+
+@pytest.fixture(scope="function")
+def container_run_endless(request, docker_client, gerrit_init_image, temp_site):
+ container_run = docker_client.containers.run(
+ image=gerrit_init_image.id,
+ entrypoint="/bin/ash",
+ command=["-c", "tail -f /dev/null"],
+ volumes={str(temp_site): {"bind": "/var/gerrit", "mode": "rw"}},
+ user="gerrit",
+ detach=True,
+ auto_remove=True,
+ platform="linux/amd64",
+ )
+
+ def stop_container():
+ container_run.stop(timeout=1)
+
+ request.addfinalizer(stop_container)
+
+ return container_run
+
+
+@pytest.mark.incremental
+class TestGerritReindex:
+ def _get_indices(self, container):
+ _, indices = container.exec_run(
+ "git config -f /var/gerrit/index/gerrit_index.config "
+ + "--name-only "
+ + "--get-regexp index"
+ )
+ indices = indices.decode().strip().splitlines()
+ return [index.split(".")[1] for index in indices]
+
+ def test_gerrit_init_skips_reindexing_on_fresh_site(
+ self, temp_site, container_run_endless
+ ):
+ assert not os.path.exists(
+ os.path.join(temp_site, "index", "gerrit_index.config")
+ )
+ exit_code, _ = container_run_endless.exec_run(
+ (
+ "python3 /var/tools/gerrit-initializer "
+ "-s /var/gerrit -c /var/config/gerrit-init.yaml init"
+ )
+ )
+ assert exit_code == 0
+ expected_files = ["gerrit_index.config"] + self._get_indices(
+ container_run_endless
+ )
+ for expected_file in expected_files:
+ assert os.path.exists(os.path.join(temp_site, "index", expected_file))
+
+ timestamp_index_dir = os.path.getctime(os.path.join(temp_site, "index"))
+
+ exit_code, _ = container_run_endless.exec_run(
+ (
+ "python3 /var/tools/gerrit-initializer "
+ "-s /var/gerrit -c /var/config/gerrit-init.yaml reindex"
+ )
+ )
+ assert exit_code == 0
+ assert timestamp_index_dir == os.path.getctime(os.path.join(temp_site, "index"))
+
+ def test_gerrit_init_fixes_missing_index_config(
+ self, container_run_endless, temp_site
+ ):
+ container_run_endless.exec_run(
+ (
+ "python3 /var/tools/gerrit-initializer "
+ "-s /var/gerrit -c /var/config/gerrit-init.yaml init"
+ )
+ )
+ os.remove(os.path.join(temp_site, "index", "gerrit_index.config"))
+
+ exit_code, _ = container_run_endless.exec_run(
+ (
+ "python3 /var/tools/gerrit-initializer "
+ "-s /var/gerrit -c /var/config/gerrit-init.yaml reindex"
+ )
+ )
+ assert exit_code == 0
+
+ exit_code, _ = container_run_endless.exec_run("/var/gerrit/bin/gerrit.sh start")
+ assert exit_code == 0
+
+ def test_gerrit_init_fixes_not_ready_indices(self, container_run_endless):
+ container_run_endless.exec_run(
+ (
+ "python3 /var/tools/gerrit-initializer "
+ "-s /var/gerrit -c /var/config/gerrit-init.yaml init"
+ )
+ )
+
+ indices = self._get_indices(container_run_endless)
+ assert indices
+ container_run_endless.exec_run(
+ f"git config -f /var/gerrit/index/gerrit_index.config {indices[0]} false"
+ )
+
+ exit_code, _ = container_run_endless.exec_run(
+ (
+ "python3 /var/tools/gerrit-initializer "
+ "-s /var/gerrit -c /var/config/gerrit-init.yaml reindex"
+ )
+ )
+ assert exit_code == 0
+
+ exit_code, _ = container_run_endless.exec_run("/var/gerrit/bin/gerrit.sh start")
+ assert exit_code == 0
+
+ def test_gerrit_init_fixes_outdated_indices(self, container_run_endless, temp_site):
+ container_run_endless.exec_run(
+ (
+ "python3 /var/tools/gerrit-initializer "
+ "-s /var/gerrit -c /var/config/gerrit-init.yaml init"
+ )
+ )
+
+ index = self._get_indices(container_run_endless)[0]
+ (name, version) = index.split("_")
+ os.rename(
+ os.path.join(temp_site, "index", index),
+ os.path.join(temp_site, "index", f"{name}_{int(version) - 1:04d}"),
+ )
+
+ exit_code, _ = container_run_endless.exec_run(
+ (
+ "python3 /var/tools/gerrit-initializer "
+ "-s /var/gerrit -c /var/config/gerrit-init.yaml reindex"
+ )
+ )
+ assert exit_code == 0
+
+ exit_code, _ = container_run_endless.exec_run("/var/gerrit/bin/gerrit.sh start")
+ assert exit_code == 0
diff --git a/charts/k8s-gerrit/tests/container-images/gerrit-init/test_container_structure_gerrit_init.py b/charts/k8s-gerrit/tests/container-images/gerrit-init/test_container_structure_gerrit_init.py
new file mode 100755
index 0000000..5861a5e
--- /dev/null
+++ b/charts/k8s-gerrit/tests/container-images/gerrit-init/test_container_structure_gerrit_init.py
@@ -0,0 +1,74 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+import utils
+
+
+@pytest.fixture(scope="module")
+def container_run(docker_client, container_endless_run_factory, gerrit_init_image):
+ container_run = container_endless_run_factory(docker_client, gerrit_init_image)
+ yield container_run
+ container_run.stop(timeout=1)
+
+
+@pytest.fixture(
+ scope="function",
+ params=[
+ "/var/tools/gerrit-initializer/__main__.py",
+ "/var/tools/gerrit-initializer/main.py",
+ ],
+)
+def expected_script(request):
+ return request.param
+
+
+@pytest.fixture(scope="function", params=["python3"])
+def expected_tool(request):
+ return request.param
+
+
+@pytest.fixture(scope="function", params=["pyyaml", "requests"])
+def expected_pip_package(request):
+ return request.param
+
+
+# pylint: disable=E1101
+@pytest.mark.structure
+def test_gerrit_init_inherits_from_gerrit_base(gerrit_init_image):
+ assert utils.check_if_ancestor_image_is_inherited(
+ gerrit_init_image, "gerrit-base:latest"
+ )
+
+
+@pytest.mark.docker
+@pytest.mark.structure
+def test_gerrit_init_contains_expected_scripts(container_run, expected_script):
+ exit_code, _ = container_run.exec_run(f"test -f {expected_script}")
+ assert exit_code == 0
+
+
+@pytest.mark.docker
+@pytest.mark.structure
+def test_gerrit_init_contains_expected_tools(container_run, expected_tool):
+ exit_code, _ = container_run.exec_run(f"which {expected_tool}")
+ assert exit_code == 0
+
+
+@pytest.mark.structure
+def test_gerrit_init_has_entrypoint(gerrit_init_image):
+ entrypoint = gerrit_init_image.attrs["ContainerConfig"]["Entrypoint"]
+ assert len(entrypoint) >= 1
+ assert entrypoint == ["python3", "/var/tools/gerrit-initializer"]
diff --git a/charts/k8s-gerrit/tests/container-images/gerrit/test_container_build_gerrit.py b/charts/k8s-gerrit/tests/container-images/gerrit/test_container_build_gerrit.py
new file mode 100644
index 0000000..a2c3dd5
--- /dev/null
+++ b/charts/k8s-gerrit/tests/container-images/gerrit/test_container_build_gerrit.py
@@ -0,0 +1,22 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+
+@pytest.mark.structure
+def test_build_gerrit(gerrit_image, tag_of_cached_container):
+ if tag_of_cached_container:
+ pytest.skip("Cached image used for testing. Build will not be tested.")
+ assert gerrit_image.id is not None
diff --git a/charts/k8s-gerrit/tests/container-images/gerrit/test_container_integration_gerrit.py b/charts/k8s-gerrit/tests/container-images/gerrit/test_container_integration_gerrit.py
new file mode 100644
index 0000000..9376a4a
--- /dev/null
+++ b/charts/k8s-gerrit/tests/container-images/gerrit/test_container_integration_gerrit.py
@@ -0,0 +1,108 @@
+# pylint: disable=W0613, E1101
+
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+import time
+
+import pytest
+import requests
+
+
+@pytest.fixture(scope="module")
+def tmp_dir(tmp_path_factory):
+ return tmp_path_factory.mktemp("gerrit-test")
+
+
+@pytest.fixture(scope="class")
+def container_run(
+ docker_client,
+ docker_network,
+ tmp_dir,
+ gerrit_image,
+ gerrit_container_factory,
+ free_port,
+):
+ configs = {
+ "gerrit.config": """
+ [gerrit]
+ basePath = git
+
+ [httpd]
+ listenUrl = http://*:8080
+
+ [test]
+ success = True
+ """,
+ "secure.config": """
+ [test]
+ success = True
+ """,
+ "replication.config": """
+ [test]
+ success = True
+ """,
+ }
+ test_setup = gerrit_container_factory(
+ docker_client, docker_network, tmp_dir, gerrit_image, configs, free_port
+ )
+ test_setup.start()
+
+ yield test_setup
+
+ test_setup.stop()
+
+
+@pytest.fixture(params=["gerrit.config", "secure.config", "replication.config"])
+def config_file_to_test(request):
+ return request.param
+
+
+@pytest.mark.docker
+@pytest.mark.incremental
+@pytest.mark.integration
+@pytest.mark.slow
+class TestGerritStartScript:
+ @pytest.mark.timeout(60)
+ def test_gerrit_gerrit_starts_up(self, container_run):
+ def wait_for_gerrit_start():
+ log = container_run.container.logs().decode("utf-8")
+ return re.search(r"Gerrit Code Review .+ ready", log)
+
+ while not wait_for_gerrit_start:
+ continue
+
+ def test_gerrit_custom_gerrit_config_available(
+ self, container_run, config_file_to_test
+ ):
+ exit_code, output = container_run.container.exec_run(
+ f"git config --file=/var/gerrit/etc/{config_file_to_test} --get test.success"
+ )
+ output = output.decode("utf-8").strip()
+ assert exit_code == 0
+ assert output == "True"
+
+ @pytest.mark.timeout(60)
+ def test_gerrit_httpd_is_responding(self, container_run):
+ status = None
+ while not status == 200:
+ try:
+ response = requests.get(f"http://localhost:{container_run.port}")
+ status = response.status_code
+ except requests.exceptions.ConnectionError:
+ time.sleep(1)
+
+ assert response.status_code == 200
+ assert re.search(r'content="Gerrit Code Review"', response.text)
diff --git a/charts/k8s-gerrit/tests/container-images/gerrit/test_container_integration_gerrit_replica.py b/charts/k8s-gerrit/tests/container-images/gerrit/test_container_integration_gerrit_replica.py
new file mode 100644
index 0000000..3673eab
--- /dev/null
+++ b/charts/k8s-gerrit/tests/container-images/gerrit/test_container_integration_gerrit_replica.py
@@ -0,0 +1,122 @@
+# pylint: disable=W0613, E1101
+
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import os.path
+import re
+
+import pygit2 as git
+import pytest
+import requests
+
+CONFIG_FILES = ["gerrit.config", "secure.config"]
+
+
+@pytest.fixture(scope="module")
+def tmp_dir(tmp_path_factory):
+ return tmp_path_factory.mktemp("gerrit-replica-test")
+
+
+@pytest.fixture(scope="class")
+def container_run(
+ request,
+ docker_client,
+ docker_network,
+ tmp_dir,
+ gerrit_image,
+ gerrit_container_factory,
+ free_port,
+):
+ configs = {
+ "gerrit.config": """
+ [gerrit]
+ basePath = git
+
+ [httpd]
+ listenUrl = http://*:8080
+
+ [container]
+ replica = true
+
+ [test]
+ success = True
+ """,
+ "secure.config": """
+ [test]
+ success = True
+ """,
+ }
+
+ test_setup = gerrit_container_factory(
+ docker_client, docker_network, tmp_dir, gerrit_image, configs, free_port
+ )
+ test_setup.start()
+
+ request.addfinalizer(test_setup.stop)
+
+ return test_setup
+
+
+@pytest.mark.docker
+@pytest.mark.incremental
+@pytest.mark.integration
+@pytest.mark.slow
+class TestGerritReplica:
+ @pytest.fixture(params=CONFIG_FILES)
+ def config_file_to_test(self, request):
+ return request.param
+
+ @pytest.fixture(params=["All-Users.git", "All-Projects.git"])
+ def expected_repository(self, request):
+ return request.param
+
+ @pytest.mark.timeout(60)
+ def test_gerrit_replica_gerrit_starts_up(self, container_run):
+ def wait_for_gerrit_start():
+ log = container_run.container.logs().decode("utf-8")
+ return re.search(r"Gerrit Code Review .+ ready", log)
+
+ while not wait_for_gerrit_start():
+ continue
+
+ def test_gerrit_replica_custom_gerrit_config_available(
+ self, container_run, config_file_to_test
+ ):
+ exit_code, output = container_run.container.exec_run(
+ f"git config --file=/var/gerrit/etc/{config_file_to_test} --get test.success"
+ )
+ output = output.decode("utf-8").strip()
+ assert exit_code == 0
+ assert output == "True"
+
+ def test_gerrit_replica_repository_exists(self, container_run, expected_repository):
+ exit_code, _ = container_run.container.exec_run(
+ f"test -d /var/gerrit/git/{expected_repository}"
+ )
+ assert exit_code == 0
+
+ def test_gerrit_replica_clone_repo_works(self, container_run, tmp_path_factory):
+ container_run.container.exec_run("git init --bare /var/gerrit/git/test.git")
+ clone_dest = tmp_path_factory.mktemp("gerrit_replica_clone_test")
+ repo = git.clone_repository(
+ f"http://localhost:{container_run.port}/test.git", clone_dest
+ )
+ assert repo.path == os.path.join(clone_dest, ".git/")
+
+ def test_gerrit_replica_webui_not_accessible(self, container_run):
+ response = requests.get(f"http://localhost:{container_run.port}")
+ assert response.status_code == 404
+ assert response.text == "Not Found"
diff --git a/charts/k8s-gerrit/tests/container-images/gerrit/test_container_structure_gerrit.py b/charts/k8s-gerrit/tests/container-images/gerrit/test_container_structure_gerrit.py
new file mode 100755
index 0000000..7ece25e
--- /dev/null
+++ b/charts/k8s-gerrit/tests/container-images/gerrit/test_container_structure_gerrit.py
@@ -0,0 +1,39 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+import utils
+
+
+@pytest.fixture(scope="module")
+def container_run(docker_client, container_endless_run_factory, gerrit_image):
+ container_run = container_endless_run_factory(docker_client, gerrit_image)
+ yield container_run
+ container_run.stop(timeout=1)
+
+
+# pylint: disable=E1101
+@pytest.mark.structure
+def test_gerrit_inherits_from_gerrit_base(gerrit_image):
+ assert utils.check_if_ancestor_image_is_inherited(
+ gerrit_image, "gerrit-base:latest"
+ )
+
+
+@pytest.mark.docker
+@pytest.mark.structure
+def test_gerrit_contains_start_script(container_run):
+ exit_code, _ = container_run.exec_run("test -f /var/tools/start")
+ assert exit_code == 0
diff --git a/charts/k8s-gerrit/tests/container-images/git-gc/test_container_build_gitgc.py b/charts/k8s-gerrit/tests/container-images/git-gc/test_container_build_gitgc.py
new file mode 100644
index 0000000..a640d20
--- /dev/null
+++ b/charts/k8s-gerrit/tests/container-images/git-gc/test_container_build_gitgc.py
@@ -0,0 +1,22 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+
+@pytest.mark.structure
+def test_build_gitgc(gitgc_image, tag_of_cached_container):
+ if tag_of_cached_container:
+ pytest.skip("Cached image used for testing. Build will not be tested.")
+ assert gitgc_image.id is not None
diff --git a/charts/k8s-gerrit/tests/container-images/git-gc/test_container_structure_gitgc.py b/charts/k8s-gerrit/tests/container-images/git-gc/test_container_structure_gitgc.py
new file mode 100644
index 0000000..9f03644
--- /dev/null
+++ b/charts/k8s-gerrit/tests/container-images/git-gc/test_container_structure_gitgc.py
@@ -0,0 +1,51 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+import utils
+
+
+@pytest.fixture(scope="module")
+def container_run(docker_client, container_endless_run_factory, gitgc_image):
+ container_run = container_endless_run_factory(docker_client, gitgc_image)
+ yield container_run
+ container_run.stop(timeout=1)
+
+
+# pylint: disable=E1101
+@pytest.mark.structure
+def test_gitgc_inherits_from_base(gitgc_image):
+ assert utils.check_if_ancestor_image_is_inherited(gitgc_image, "base:latest")
+
+
+@pytest.mark.docker
+@pytest.mark.structure
+def test_gitgc_log_dir_writable_by_gerrit(container_run):
+ exit_code, _ = container_run.exec_run("touch /var/log/git/test.log")
+ assert exit_code == 0
+
+
+@pytest.mark.docker
+@pytest.mark.structure
+def test_gitgc_contains_gc_script(container_run):
+ exit_code, _ = container_run.exec_run("test -f /var/tools/gc.sh")
+ assert exit_code == 0
+
+
+@pytest.mark.structure
+def test_gitgc_has_entrypoint(gitgc_image):
+ entrypoint = gitgc_image.attrs["ContainerConfig"]["Entrypoint"]
+ assert len(entrypoint) == 1
+ assert entrypoint[0] == "/var/tools/gc.sh"
diff --git a/charts/k8s-gerrit/tests/fixtures/__init__.py b/charts/k8s-gerrit/tests/fixtures/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/charts/k8s-gerrit/tests/fixtures/__init__.py
diff --git a/charts/k8s-gerrit/tests/fixtures/cluster.py b/charts/k8s-gerrit/tests/fixtures/cluster.py
new file mode 100644
index 0000000..eb94968
--- /dev/null
+++ b/charts/k8s-gerrit/tests/fixtures/cluster.py
@@ -0,0 +1,144 @@
+# pylint: disable=W0613
+
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import json
+import warnings
+
+from kubernetes import client, config
+
+import pytest
+
+from .helm.client import HelmClient
+
+
+class Cluster:
+ def __init__(self, kube_config):
+ self.kube_config = kube_config
+
+ self.image_pull_secrets = []
+ self.namespaces = []
+
+ context = self._load_kube_config()
+ self.helm = HelmClient(self.kube_config, context)
+
+ def _load_kube_config(self):
+ config.load_kube_config(config_file=self.kube_config)
+ _, context = config.list_kube_config_contexts(config_file=self.kube_config)
+ return context["name"]
+
+ def _apply_image_pull_secrets(self, namespace):
+ for ips in self.image_pull_secrets:
+ try:
+ client.CoreV1Api().create_namespaced_secret(namespace, ips)
+ except client.rest.ApiException as exc:
+ if exc.status == 409 and exc.reason == "Conflict":
+ warnings.warn(
+ "Kubernetes Cluster not empty. Image pull secret already exists."
+ )
+ else:
+ raise exc
+
+ def add_container_registry(self, secret_name, url, user, pwd):
+ data = {
+ "auths": {
+ url: {
+ "auth": base64.b64encode(str.encode(f"{user}:{pwd}")).decode(
+ "utf-8"
+ )
+ }
+ }
+ }
+ metadata = client.V1ObjectMeta(name=secret_name)
+ self.image_pull_secrets.append(
+ client.V1Secret(
+ api_version="v1",
+ kind="Secret",
+ metadata=metadata,
+ type="kubernetes.io/dockerconfigjson",
+ data={
+ ".dockerconfigjson": base64.b64encode(
+ json.dumps(data).encode()
+ ).decode("utf-8")
+ },
+ )
+ )
+
+ def create_namespace(self, name):
+ namespace_metadata = client.V1ObjectMeta(name=name)
+ namespace_body = client.V1Namespace(
+ kind="Namespace", api_version="v1", metadata=namespace_metadata
+ )
+ client.CoreV1Api().create_namespace(body=namespace_body)
+ self.namespaces.append(name)
+ self._apply_image_pull_secrets(name)
+
+ def delete_namespace(self, name):
+ if name not in self.namespaces:
+ return
+
+ client.CoreV1Api().delete_namespace(name, body=client.V1DeleteOptions())
+ self.namespaces.remove(name)
+
+ def cleanup(self):
+ while self.namespaces:
+ self.helm.delete_all(
+ namespace=self.namespaces[0],
+ )
+ self.delete_namespace(self.namespaces[0])
+
+
+@pytest.fixture(scope="session")
+def test_cluster(request):
+ kube_config = request.config.getoption("--kubeconfig")
+
+ test_cluster = Cluster(kube_config)
+ test_cluster.add_container_registry(
+ "image-pull-secret",
+ request.config.getoption("--registry"),
+ request.config.getoption("--registry-user"),
+ request.config.getoption("--registry-pwd"),
+ )
+
+ yield test_cluster
+
+ test_cluster.cleanup()
+
+
+@pytest.fixture(scope="session")
+def ldap_credentials(test_cluster):
+ ldap_secret = client.CoreV1Api().read_namespaced_secret(
+ "openldap-users", namespace="openldap"
+ )
+ users = base64.b64decode(ldap_secret.data["users"]).decode("utf-8").split(",")
+ passwords = (
+ base64.b64decode(ldap_secret.data["passwords"]).decode("utf-8").split(",")
+ )
+ credentials = {}
+ for i, user in enumerate(users):
+ credentials[user] = passwords[i]
+
+ yield credentials
+
+
+@pytest.fixture(scope="session")
+def ldap_admin_credentials(test_cluster):
+ ldap_secret = client.CoreV1Api().read_namespaced_secret(
+ "openldap-admin", namespace="openldap"
+ )
+ password = base64.b64decode(ldap_secret.data["adminpassword"]).decode("utf-8")
+
+ yield ("admin", password)
diff --git a/charts/k8s-gerrit/tests/fixtures/credentials.py b/charts/k8s-gerrit/tests/fixtures/credentials.py
new file mode 100644
index 0000000..de39dc1
--- /dev/null
+++ b/charts/k8s-gerrit/tests/fixtures/credentials.py
@@ -0,0 +1,39 @@
+# pylint: disable=W0613
+
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import pytest
+
+from passlib.apache import HtpasswdFile
+
+import utils
+
+
+@pytest.fixture(scope="session")
+def credentials_dir(tmp_path_factory):
+ return tmp_path_factory.mktemp("creds")
+
+
+@pytest.fixture(scope="session")
+def htpasswd(credentials_dir):
+ basic_auth_creds = {"user": "admin", "password": utils.create_random_string(16)}
+ htpasswd_file = HtpasswdFile(os.path.join(credentials_dir, ".htpasswd"), new=True)
+ htpasswd_file.set_password(basic_auth_creds["user"], basic_auth_creds["password"])
+ htpasswd_file.save()
+ basic_auth_creds["htpasswd_string"] = htpasswd_file.to_string()
+ basic_auth_creds["htpasswd_file"] = credentials_dir
+ yield basic_auth_creds
diff --git a/charts/k8s-gerrit/tests/fixtures/helm/__init__.py b/charts/k8s-gerrit/tests/fixtures/helm/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/charts/k8s-gerrit/tests/fixtures/helm/__init__.py
diff --git a/charts/k8s-gerrit/tests/fixtures/helm/abstract_deployment.py b/charts/k8s-gerrit/tests/fixtures/helm/abstract_deployment.py
new file mode 100644
index 0000000..517cfe2
--- /dev/null
+++ b/charts/k8s-gerrit/tests/fixtures/helm/abstract_deployment.py
@@ -0,0 +1,99 @@
+import abc
+import random
+import re
+import string
+
+from time import time
+
+from kubernetes import client
+
+
+class AbstractDeployment(abc.ABC):
+ def __init__(self, tmp_dir):
+ self.tmp_dir = tmp_dir
+ self.namespace = "".join(
+ [random.choice(string.ascii_letters) for n in range(8)]
+ ).lower()
+ self.values_file = self._set_values_file()
+ self.chart_opts = {}
+
+ @abc.abstractmethod
+ def install(self, wait=True):
+ pass
+
+ @abc.abstractmethod
+ def update(self):
+ pass
+
+ @abc.abstractmethod
+ def uninstall(self):
+ pass
+
+ @abc.abstractmethod
+ def _set_values_file(self):
+ pass
+
+ def set_helm_value(self, combined_key, value):
+ nested_keys = re.split(r"(?<!\\)\.", combined_key)
+ dct_pointer = self.chart_opts
+ for key in nested_keys[:-1]:
+ # pylint: disable=W1401
+ key.replace("\.", ".")
+ dct_pointer = dct_pointer.setdefault(key, {})
+ # pylint: disable=W1401
+ dct_pointer[nested_keys[-1].replace("\.", ".")] = value
+
+ def _wait_for_pod_readiness(self, pod_labels, timeout=180):
+ """Helper function that can be used to wait for all pods with a given set of
+ labels to be ready.
+
+ Arguments:
+ pod_labels {str} -- Label selector string to be used to select pods.
+ (https://kubernetes.io/docs/concepts/overview/working-with-objects/\
+ labels/#label-selectors)
+
+ Keyword Arguments:
+ timeout {int} -- Time in seconds to wait for the pod status to become ready.
+ (default: {180})
+
+ Returns:
+ boolean -- Whether pods were ready in time.
+ """
+
+ def check_pod_readiness():
+ core_v1 = client.CoreV1Api()
+ pod_list = core_v1.list_pod_for_all_namespaces(
+ watch=False, label_selector=pod_labels
+ )
+ for pod in pod_list.items:
+ for condition in pod.status.conditions:
+ if condition.type != "Ready" and condition.status != "True":
+ return False
+ return True
+
+ return self._exec_fn_with_timeout(check_pod_readiness, limit=timeout)
+
+ def _exec_fn_with_timeout(self, func, limit=60):
+ """Helper function that executes a given function until it returns True or a
+ given time limit is reached.
+
+ Arguments:
+ func {function} -- Function to execute. The function can return some output
+ (or None) and as a second return value a boolean indicating,
+ whether the event the function was waiting for has happened.
+
+ Keyword Arguments:
+ limit {int} -- Maximum time in seconds to wait for a positive response of
+ the function (default: {60})
+
+ Returns:
+ boolean -- False, if the timeout was reached
+ any -- Last output of fn
+ """
+
+ timeout = time() + limit
+ while time() < timeout:
+ is_finished = func()
+ if is_finished:
+ return True
+ return False
diff --git a/charts/k8s-gerrit/tests/fixtures/helm/client.py b/charts/k8s-gerrit/tests/fixtures/helm/client.py
new file mode 100644
index 0000000..eb3285f
--- /dev/null
+++ b/charts/k8s-gerrit/tests/fixtures/helm/client.py
@@ -0,0 +1,202 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import subprocess
+
+
+class HelmClient:
+ def __init__(self, kubeconfig, kubecontext):
+ """Wrapper for Helm CLI.
+
+ Arguments:
+ kubeconfig {str} -- Path to kubeconfig-file describing the cluster to
+ connect to.
+ kubecontext {str} -- Name of the context to use.
+ """
+
+ self.kubeconfig = kubeconfig
+ self.kubecontext = kubecontext
+
+ def _exec_command(self, cmd, fail_on_err=True):
+ base_cmd = [
+ "helm",
+ "--kubeconfig",
+ self.kubeconfig,
+ "--kube-context",
+ self.kubecontext,
+ ]
+ return subprocess.run(
+ base_cmd + cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ check=fail_on_err,
+ text=True,
+ )
+
+ def install(
+ self,
+ chart,
+ name,
+ values_file=None,
+ set_values=None,
+ namespace=None,
+ fail_on_err=True,
+ wait=True,
+ ):
+ """Installs a chart on the cluster
+
+ Arguments:
+ chart {str} -- Release name or path of a helm chart
+ name {str} -- Name with which the chart will be installed on the cluster
+
+ Keyword Arguments:
+ values_file {str} -- Path to a custom values.yaml file (default: {None})
+ set_values {dict} -- Dictionary containing key-value-pairs that are used
+ to overwrite values in the values.yaml-file.
+ (default: {None})
+ namespace {str} -- Namespace to install the release into (default: {default})
+ fail_on_err {bool} -- Whether to fail with an exception if the installation
+ fails (default: {True})
+ wait {bool} -- Whether to wait for all pods to be ready (default: {True})
+
+ Returns:
+ CompletedProcess -- CompletedProcess-object returned by subprocess
+ containing details about the result and output of the
+ executed command.
+ """
+
+ helm_cmd = ["install", name, chart, "--dependency-update"]
+ if values_file:
+ helm_cmd.extend(("-f", values_file))
+ if set_values:
+ opt_list = [f"{k}={v}" for k, v in set_values.items()]
+ helm_cmd.extend(("--set", ",".join(opt_list)))
+ if namespace:
+ helm_cmd.extend(("--namespace", namespace))
+ if wait:
+ helm_cmd.append("--wait")
+ return self._exec_command(helm_cmd, fail_on_err)
+
+ def list(self, namespace=None):
+ """Lists helm charts installed on the cluster.
+
+ Keyword Arguments:
+ namespace {str} -- Kubernetes namespace (default: {None})
+
+ Returns:
+ list -- List of helm chart realeases installed on the cluster.
+ """
+
+ helm_cmd = ["list", "--all", "--output", "json"]
+ if namespace:
+ helm_cmd.extend(("--namespace", namespace))
+ output = self._exec_command(helm_cmd).stdout
+ return json.loads(output)
+
+ def upgrade(
+ self,
+ chart,
+ name,
+ namespace,
+ values_file=None,
+ set_values=None,
+ reuse_values=True,
+ fail_on_err=True,
+ ):
+ """Updates a chart on the cluster
+
+ Arguments:
+ chart {str} -- Release name or path of a helm chart
+ name {str} -- Name with which the chart will be installed on the cluster
+ namespace {str} -- Kubernetes namespace
+
+ Keyword Arguments:
+ values_file {str} -- Path to a custom values.yaml file (default: {None})
+ set_values {dict} -- Dictionary containing key-value-pairs that are used
+ to overwrite values in the values.yaml-file.
+ (default: {None})
+ reuse_values {bool} -- Whether to reuse existing not overwritten values
+ (default: {True})
+ fail_on_err {bool} -- Whether to fail with an exception if the installation
+ fails (default: {True})
+
+ Returns:
+ CompletedProcess -- CompletedProcess-object returned by subprocess
+ containing details about the result and output of the
+ executed command.
+ """
+ helm_cmd = ["upgrade", name, chart, "--namespace", namespace, "--wait"]
+ if values_file:
+ helm_cmd.extend(("-f", values_file))
+ if reuse_values:
+ helm_cmd.append("--reuse-values")
+ if set_values:
+ opt_list = [f"{k}={v}" for k, v in set_values.items()]
+ helm_cmd.extend(("--set", ",".join(opt_list)))
+ return self._exec_command(helm_cmd, fail_on_err)
+
+ def delete(self, name, namespace=None):
+ """Deletes a chart from the cluster
+
+ Arguments:
+ name {str} -- Name of the chart to delete
+
+ Keyword Arguments:
+ namespace {str} -- Kubernetes namespace (default: {None})
+
+ Returns:
+ CompletedProcess -- CompletedProcess-object returned by subprocess
+ containing details about the result and output of
+ the executed command.
+ """
+
+ if name not in self.list(namespace):
+ return None
+
+ helm_cmd = ["delete", name]
+ if namespace:
+ helm_cmd.extend(("--namespace", namespace))
+ return self._exec_command(helm_cmd)
+
+ def delete_all(self, namespace=None, exceptions=None):
+ """Deletes all charts on the cluster
+
+ Keyword Arguments:
+ namespace {str} -- Kubernetes namespace (default: {None})
+ exceptions {list} -- List of chart names not to delete (default: {None})
+ """
+
+ charts = self.list(namespace)
+ for chart in charts:
+ if exceptions and chart["name"] in exceptions:
+ continue
+ self.delete(chart["name"], namespace)
+
+ def is_installed(self, namespace, chart):
+ """Checks if a chart is installed in the cluster
+
+ Keyword Arguments:
+ namespace {str} -- Kubernetes namespace
+ chart {str} -- Name of the chart
+
+ Returns:
+ bool -- Whether the chart is installed
+ """
+
+ for installed_chart in self.list(namespace):
+ if installed_chart["name"] == chart:
+ return True
+
+ return False
diff --git a/charts/k8s-gerrit/tests/fixtures/helm/gerrit.py b/charts/k8s-gerrit/tests/fixtures/helm/gerrit.py
new file mode 100644
index 0000000..ec7a7c1
--- /dev/null
+++ b/charts/k8s-gerrit/tests/fixtures/helm/gerrit.py
@@ -0,0 +1,279 @@
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os.path
+
+from copy import deepcopy
+from pathlib import Path
+
+import pytest
+import yaml
+
+import pygit2 as git
+import chromedriver_autoinstaller
+from kubernetes import client
+from selenium import webdriver
+from selenium.webdriver.common.by import By
+
+from .abstract_deployment import AbstractDeployment
+
+
+class TimeOutException(Exception):
+ """Exception to be raised, if some action does not finish in time."""
+
+
+def dict_to_git_config(config_dict):
+ config = ""
+ for section, options in config_dict.items():
+ config += f"[{section}]\n"
+ for key, value in options.items():
+ if isinstance(value, bool):
+ value = "true" if value else "false"
+ elif isinstance(value, list):
+ for opt in value:
+ config += f" {key} = {opt}\n"
+ continue
+ config += f" {key} = {value}\n"
+ return config
+
+
+GERRIT_STARTUP_TIMEOUT = 240
+
+DEFAULT_GERRIT_CONFIG = {
+ "auth": {
+ "type": "LDAP",
+ },
+ "container": {
+ "user": "gerrit",
+ "javaHome": "/usr/lib/jvm/java-11-openjdk",
+ "javaOptions": [
+ "-Djavax.net.ssl.trustStore=/var/gerrit/etc/keystore",
+ "-Xms200m",
+ "-Xmx4g",
+ ],
+ },
+ "gerrit": {
+ "basePath": "git",
+ "canonicalWebUrl": "http://example.com/",
+ "serverId": "gerrit-1",
+ },
+ "httpd": {
+ "listenUrl": "proxy-https://*:8080/",
+ "requestLog": True,
+ "gracefulStopTimeout": "1m",
+ },
+ "index": {"type": "LUCENE", "onlineUpgrade": False},
+ "ldap": {
+ "server": "ldap://openldap.openldap.svc.cluster.local:1389",
+ "accountbase": "dc=example,dc=org",
+ "username": "cn=admin,dc=example,dc=org",
+ },
+ "sshd": {"listenAddress": "off"},
+}
+
+DEFAULT_VALUES = {
+ "gitRepositoryStorage": {"externalPVC": {"use": True, "name": "repo-storage"}},
+ "gitGC": {"logging": {"persistence": {"enabled": False}}},
+ "gerrit": {
+ "etc": {"config": {"gerrit.config": dict_to_git_config(DEFAULT_GERRIT_CONFIG)}}
+ },
+}
+
+
+# pylint: disable=R0902
+class GerritDeployment(AbstractDeployment):
+ def __init__(
+ self,
+ tmp_dir,
+ cluster,
+ storageclass,
+ container_registry,
+ container_org,
+ container_version,
+ ingress_url,
+ ldap_admin_credentials,
+ ldap_credentials,
+ ):
+ super().__init__(tmp_dir)
+ self.cluster = cluster
+ self.storageclass = storageclass
+ self.ldap_credentials = ldap_credentials
+
+ self.chart_name = "gerrit-" + self.namespace
+ self.chart_path = os.path.join(
+ # pylint: disable=E1101
+ Path(git.discover_repository(os.path.realpath(__file__))).parent.absolute(),
+ "helm-charts",
+ "gerrit",
+ )
+
+ self.gerrit_config = deepcopy(DEFAULT_GERRIT_CONFIG)
+ self.chart_opts = deepcopy(DEFAULT_VALUES)
+
+ self._configure_container_images(
+ container_registry, container_org, container_version
+ )
+ self.hostname = f"{self.namespace}.{ingress_url}"
+ self._configure_ingress()
+ self.set_gerrit_config_value(
+ "gerrit", "canonicalWebUrl", f"http://{self.hostname}"
+ )
+ # pylint: disable=W1401
+ self.set_helm_value(
+ "gerrit.etc.secret.secure\.config",
+ dict_to_git_config({"ldap": {"password": ldap_admin_credentials[1]}}),
+ )
+
+ def install(self, wait=True):
+ if self.cluster.helm.is_installed(self.namespace, self.chart_name):
+ self.update()
+ return
+
+ with open(self.values_file, "w", encoding="UTF-8") as f:
+ yaml.dump(self.chart_opts, f)
+
+ self.cluster.create_namespace(self.namespace)
+ self._create_pvc()
+
+ self.cluster.helm.install(
+ self.chart_path,
+ self.chart_name,
+ values_file=self.values_file,
+ fail_on_err=True,
+ namespace=self.namespace,
+ wait=wait,
+ )
+
+ def create_admin_account(self):
+ self.wait_until_ready()
+ chromedriver_autoinstaller.install()
+ options = webdriver.ChromeOptions()
+ options.add_argument("--headless")
+ options.add_argument("--no-sandbox")
+ options.add_argument("--ignore-certificate-errors")
+ options.set_capability("acceptInsecureCerts", True)
+ driver = webdriver.Chrome(
+ options=options,
+ )
+ driver.get(f"http://{self.hostname}/login")
+ user_input = driver.find_element(By.ID, "f_user")
+ user_input.send_keys("gerrit-admin")
+
+ pwd_input = driver.find_element(By.ID, "f_pass")
+ pwd_input.send_keys(self.ldap_credentials["gerrit-admin"])
+
+ submit_btn = driver.find_element(By.ID, "b_signin")
+ submit_btn.click()
+
+ driver.close()
+
+ def update(self):
+ with open(self.values_file, "w", encoding="UTF-8") as f:
+ yaml.dump(self.chart_opts, f)
+
+ self.cluster.helm.upgrade(
+ self.chart_path,
+ self.chart_name,
+ values_file=self.values_file,
+ fail_on_err=True,
+ namespace=self.namespace,
+ )
+
+ def wait_until_ready(self):
+ pod_labels = f"app=gerrit,release={self.chart_name}"
+ finished_in_time = self._wait_for_pod_readiness(
+ pod_labels, timeout=GERRIT_STARTUP_TIMEOUT
+ )
+
+ if not finished_in_time:
+ raise TimeOutException(
+ f"Gerrit pod was not ready in time ({GERRIT_STARTUP_TIMEOUT} s)."
+ )
+
+ def uninstall(self):
+ self.cluster.helm.delete(self.chart_name, namespace=self.namespace)
+ self.cluster.delete_namespace(self.namespace)
+
+ def set_gerrit_config_value(self, section, key, value):
+ if isinstance(self.gerrit_config[section][key], list):
+ self.gerrit_config[section][key].append(value)
+ else:
+ self.gerrit_config[section][key] = value
+ # pylint: disable=W1401
+ self.set_helm_value(
+ "gerrit.etc.config.gerrit\.config", dict_to_git_config(self.gerrit_config)
+ )
+
+ def _set_values_file(self):
+ return os.path.join(self.tmp_dir, "values.yaml")
+
+ def _configure_container_images(
+ self, container_registry, container_org, container_version
+ ):
+ self.set_helm_value("images.registry.name", container_registry)
+ self.set_helm_value("gitGC.image", f"{container_org}/git-gc")
+ self.set_helm_value("gerrit.images.gerritInit", f"{container_org}/gerrit-init")
+ self.set_helm_value("gerrit.images.gerrit", f"{container_org}/gerrit")
+ self.set_helm_value("images.version", container_version)
+
+ def _configure_ingress(self):
+ self.set_helm_value("ingress.enabled", True)
+ self.set_helm_value("ingress.host", self.hostname)
+
+ def _create_pvc(self):
+ core_v1 = client.CoreV1Api()
+ core_v1.create_namespaced_persistent_volume_claim(
+ self.namespace,
+ body=client.V1PersistentVolumeClaim(
+ kind="PersistentVolumeClaim",
+ api_version="v1",
+ metadata=client.V1ObjectMeta(name="repo-storage"),
+ spec=client.V1PersistentVolumeClaimSpec(
+ access_modes=["ReadWriteMany"],
+ storage_class_name=self.storageclass,
+ resources=client.V1ResourceRequirements(
+ requests={"storage": "1Gi"}
+ ),
+ ),
+ ),
+ )
+
+
+@pytest.fixture(scope="class")
+def gerrit_deployment(
+ request, tmp_path_factory, test_cluster, ldap_admin_credentials, ldap_credentials
+):
+ deployment = GerritDeployment(
+ tmp_path_factory.mktemp("gerrit_deployment"),
+ test_cluster,
+ request.config.getoption("--rwm-storageclass").lower(),
+ request.config.getoption("--registry"),
+ request.config.getoption("--org"),
+ request.config.getoption("--tag"),
+ request.config.getoption("--ingress-url"),
+ ldap_admin_credentials,
+ ldap_credentials,
+ )
+
+ yield deployment
+
+ deployment.uninstall()
+
+
+@pytest.fixture(scope="class")
+def default_gerrit_deployment(gerrit_deployment):
+ gerrit_deployment.install()
+ gerrit_deployment.create_admin_account()
+
+ yield gerrit_deployment
diff --git a/charts/k8s-gerrit/tests/helm-charts/gerrit/test_chart_gerrit_plugins.py b/charts/k8s-gerrit/tests/helm-charts/gerrit/test_chart_gerrit_plugins.py
new file mode 100644
index 0000000..62981ac
--- /dev/null
+++ b/charts/k8s-gerrit/tests/helm-charts/gerrit/test_chart_gerrit_plugins.py
@@ -0,0 +1,343 @@
+# pylint: disable=W0613
+
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import hashlib
+import json
+import os.path
+import time
+
+import pytest
+import requests
+
+from kubernetes import client
+from kubernetes.stream import stream
+
+PLUGINS = ["avatars-gravatar", "readonly"]
+LIBS = ["global-refdb"]
+GERRIT_VERSION = "3.8"
+
+
+@pytest.fixture(scope="module")
+def plugin_list():
+ plugin_list = []
+ for plugin in PLUGINS:
+ url = (
+ f"https://gerrit-ci.gerritforge.com/view/Plugins-stable-{GERRIT_VERSION}/"
+ f"job/plugin-{plugin}-bazel-master-stable-{GERRIT_VERSION}/lastSuccessfulBuild/"
+ f"artifact/bazel-bin/plugins/{plugin}/{plugin}.jar"
+ )
+ jar = requests.get(url, verify=False).content
+ plugin_list.append(
+ {"name": plugin, "url": url, "sha1": hashlib.sha1(jar).hexdigest()}
+ )
+ return plugin_list
+
+
+@pytest.fixture(scope="module")
+def lib_list():
+ lib_list = []
+ for lib in LIBS:
+ url = (
+ f"https://gerrit-ci.gerritforge.com/view/Plugins-stable-{GERRIT_VERSION}/"
+ f"job/module-{lib}-bazel-stable-{GERRIT_VERSION}/lastSuccessfulBuild/"
+ f"artifact/bazel-bin/plugins/{lib}/{lib}.jar"
+ )
+ jar = requests.get(url, verify=False).content
+ lib_list.append(
+ {"name": lib, "url": url, "sha1": hashlib.sha1(jar).hexdigest()}
+ )
+ return lib_list
+
+
+@pytest.fixture(
+ scope="class",
+ params=[
+ [{"name": "replication"}],
+ [{"name": "replication"}, {"name": "download-commands"}],
+ ],
+ ids=["single-packaged-plugin", "multiple-packaged-plugins"],
+)
+def gerrit_deployment_with_packaged_plugins(request, gerrit_deployment):
+ gerrit_deployment.set_helm_value("gerrit.pluginManagement.plugins", request.param)
+ gerrit_deployment.install()
+ gerrit_deployment.create_admin_account()
+
+ yield gerrit_deployment, request.param
+
+
+@pytest.fixture(
+ scope="class", params=[1, 2], ids=["single-other-plugin", "multiple-other-plugins"]
+)
+def gerrit_deployment_with_other_plugins(
+ request,
+ plugin_list,
+ gerrit_deployment,
+):
+ selected_plugins = plugin_list[: request.param]
+
+ gerrit_deployment.set_helm_value(
+ "gerrit.pluginManagement.plugins", selected_plugins
+ )
+
+ gerrit_deployment.install()
+ gerrit_deployment.create_admin_account()
+
+ yield gerrit_deployment, selected_plugins
+
+
+@pytest.fixture(scope="class")
+def gerrit_deployment_with_libs(
+ request,
+ lib_list,
+ gerrit_deployment,
+):
+ gerrit_deployment.set_helm_value("gerrit.pluginManagement.libs", lib_list)
+
+ gerrit_deployment.install()
+ gerrit_deployment.create_admin_account()
+
+ yield gerrit_deployment, lib_list
+
+
+@pytest.fixture(scope="class")
+def gerrit_deployment_with_other_plugin_wrong_sha(plugin_list, gerrit_deployment):
+ plugin = plugin_list[0]
+ plugin["sha1"] = "notAValidSha"
+ gerrit_deployment.set_helm_value("gerrit.pluginManagement.plugins", [plugin])
+
+ gerrit_deployment.install(wait=False)
+
+ yield gerrit_deployment
+
+
+def get_gerrit_plugin_list(gerrit_url, user="admin", password="secret"):
+ list_plugins_url = f"{gerrit_url}/a/plugins/?all"
+ response = requests.get(list_plugins_url, auth=(user, password))
+ if not response.status_code == 200:
+ return None
+ body = response.text
+ return json.loads(body[body.index("\n") + 1 :])
+
+
+def get_gerrit_lib_list(gerrit_deployment):
+ response = (
+ stream(
+ client.CoreV1Api().connect_get_namespaced_pod_exec,
+ gerrit_deployment.chart_name + "-gerrit-stateful-set-0",
+ gerrit_deployment.namespace,
+ command=["/bin/ash", "-c", "ls /var/gerrit/lib"],
+ stdout=True,
+ )
+ .strip()
+ .split()
+ )
+ return [os.path.splitext(r)[0] for r in response]
+
+
+@pytest.mark.slow
+@pytest.mark.incremental
+@pytest.mark.integration
+@pytest.mark.kubernetes
+class TestgerritChartPackagedPluginInstall:
+ def _assert_installed_plugins(self, expected_plugins, installed_plugins):
+ for plugin in expected_plugins:
+ plugin_name = plugin["name"]
+ assert plugin_name in installed_plugins
+ assert installed_plugins[plugin_name]["filename"] == f"{plugin_name}.jar"
+
+ @pytest.mark.timeout(300)
+ def test_install_packaged_plugins(
+ self, request, gerrit_deployment_with_packaged_plugins, ldap_credentials
+ ):
+ gerrit_deployment, expected_plugins = gerrit_deployment_with_packaged_plugins
+ response = None
+ while not response:
+ try:
+ response = get_gerrit_plugin_list(
+ f"http://{gerrit_deployment.hostname}",
+ "gerrit-admin",
+ ldap_credentials["gerrit-admin"],
+ )
+ except requests.exceptions.ConnectionError:
+ time.sleep(1)
+
+ self._assert_installed_plugins(expected_plugins, response)
+
+ @pytest.mark.timeout(300)
+ def test_install_packaged_plugins_are_removed_with_update(
+ self,
+ request,
+ test_cluster,
+ gerrit_deployment_with_packaged_plugins,
+ ldap_credentials,
+ ):
+ gerrit_deployment, expected_plugins = gerrit_deployment_with_packaged_plugins
+ removed_plugin = expected_plugins.pop()
+
+ gerrit_deployment.set_helm_value(
+ "gerrit.pluginManagement.plugins", expected_plugins
+ )
+ gerrit_deployment.update()
+
+ response = None
+ while True:
+ try:
+ response = get_gerrit_plugin_list(
+ f"http://{gerrit_deployment.hostname}",
+ "gerrit-admin",
+ ldap_credentials["gerrit-admin"],
+ )
+ if response is not None and removed_plugin["name"] not in response:
+ break
+ except requests.exceptions.ConnectionError:
+ time.sleep(1)
+
+ assert removed_plugin["name"] not in response
+ self._assert_installed_plugins(expected_plugins, response)
+
+
+@pytest.mark.slow
+@pytest.mark.incremental
+@pytest.mark.integration
+@pytest.mark.kubernetes
+class TestGerritChartOtherPluginInstall:
+ def _assert_installed_plugins(self, expected_plugins, installed_plugins):
+ for plugin in expected_plugins:
+ assert plugin["name"] in installed_plugins
+ assert (
+ installed_plugins[plugin["name"]]["filename"] == f"{plugin['name']}.jar"
+ )
+
+ @pytest.mark.timeout(300)
+ def test_install_other_plugins(
+ self, gerrit_deployment_with_other_plugins, ldap_credentials
+ ):
+ gerrit_deployment, expected_plugins = gerrit_deployment_with_other_plugins
+ response = None
+ while not response:
+ try:
+ response = get_gerrit_plugin_list(
+ f"http://{gerrit_deployment.hostname}",
+ "gerrit-admin",
+ ldap_credentials["gerrit-admin"],
+ )
+ except requests.exceptions.ConnectionError:
+ continue
+ self._assert_installed_plugins(expected_plugins, response)
+
+ @pytest.mark.timeout(300)
+ def test_install_other_plugins_are_removed_with_update(
+ self, gerrit_deployment_with_other_plugins, ldap_credentials
+ ):
+ gerrit_deployment, installed_plugins = gerrit_deployment_with_other_plugins
+ removed_plugin = installed_plugins.pop()
+ gerrit_deployment.set_helm_value(
+ "gerrit.pluginManagement.plugins", installed_plugins
+ )
+ gerrit_deployment.update()
+
+ response = None
+ while True:
+ try:
+ response = get_gerrit_plugin_list(
+ f"http://{gerrit_deployment.hostname}",
+ "gerrit-admin",
+ ldap_credentials["gerrit-admin"],
+ )
+ if response is not None and removed_plugin["name"] not in response:
+ break
+ except requests.exceptions.ConnectionError:
+ time.sleep(1)
+
+ assert removed_plugin["name"] not in response
+ self._assert_installed_plugins(installed_plugins, response)
+
+
+@pytest.mark.slow
+@pytest.mark.incremental
+@pytest.mark.integration
+@pytest.mark.kubernetes
+class TestGerritChartLibModuleInstall:
+ def _assert_installed_libs(self, expected_libs, installed_libs):
+ for lib in expected_libs:
+ assert lib["name"] in installed_libs
+
+ @pytest.mark.timeout(300)
+ def test_install_libs(self, gerrit_deployment_with_libs):
+ gerrit_deployment, expected_libs = gerrit_deployment_with_libs
+ response = get_gerrit_lib_list(gerrit_deployment)
+ self._assert_installed_libs(expected_libs, response)
+
+ @pytest.mark.timeout(300)
+ def test_install_other_plugins_are_removed_with_update(
+ self, gerrit_deployment_with_libs
+ ):
+ gerrit_deployment, installed_libs = gerrit_deployment_with_libs
+ removed_lib = installed_libs.pop()
+ gerrit_deployment.set_helm_value("gerrit.pluginManagement.libs", installed_libs)
+ gerrit_deployment.update()
+
+ response = None
+ while True:
+ try:
+ response = get_gerrit_lib_list(gerrit_deployment)
+ if response is not None and removed_lib["name"] not in response:
+ break
+ except requests.exceptions.ConnectionError:
+ time.sleep(1)
+
+ assert removed_lib["name"] not in response
+ self._assert_installed_libs(installed_libs, response)
+
+
+@pytest.mark.integration
+@pytest.mark.kubernetes
+@pytest.mark.timeout(180)
+def test_install_other_plugins_fails_wrong_sha(
+ gerrit_deployment_with_other_plugin_wrong_sha,
+):
+ pod_labels = f"app.kubernetes.io/component=gerrit,release={gerrit_deployment_with_other_plugin_wrong_sha.chart_name}"
+ core_v1 = client.CoreV1Api()
+ pod_name = ""
+ while not pod_name:
+ pod_list = core_v1.list_namespaced_pod(
+ namespace=gerrit_deployment_with_other_plugin_wrong_sha.namespace,
+ watch=False,
+ label_selector=pod_labels,
+ )
+ if len(pod_list.items) > 1:
+ raise RuntimeError("Too many gerrit pods with the same release name.")
+ elif len(pod_list.items) == 1:
+ pod_name = pod_list.items[0].metadata.name
+
+ current_status = None
+ while not current_status:
+ pod = core_v1.read_namespaced_pod_status(
+ pod_name, gerrit_deployment_with_other_plugin_wrong_sha.namespace
+ )
+ if not pod.status.init_container_statuses:
+ time.sleep(1)
+ continue
+ for init_container_status in pod.status.init_container_statuses:
+ if (
+ init_container_status.name == "gerrit-init"
+ and init_container_status.last_state.terminated
+ ):
+ current_status = init_container_status
+ assert current_status.last_state.terminated.exit_code > 0
+ return
+
+ assert current_status.last_state.terminated.exit_code > 0
diff --git a/charts/k8s-gerrit/tests/helm-charts/gerrit/test_chart_gerrit_setup.py b/charts/k8s-gerrit/tests/helm-charts/gerrit/test_chart_gerrit_setup.py
new file mode 100644
index 0000000..306d41c
--- /dev/null
+++ b/charts/k8s-gerrit/tests/helm-charts/gerrit/test_chart_gerrit_setup.py
@@ -0,0 +1,29 @@
+# pylint: disable=W0613
+
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+
+@pytest.mark.integration
+@pytest.mark.kubernetes
+def test_deployment(test_cluster, default_gerrit_deployment):
+ installed_charts = test_cluster.helm.list(default_gerrit_deployment.namespace)
+ gerrit_chart = None
+ for chart in installed_charts:
+ if chart["name"].startswith("gerrit"):
+ gerrit_chart = chart
+ assert gerrit_chart is not None
+ assert gerrit_chart["status"].lower() == "deployed"
diff --git a/charts/k8s-gerrit/tests/helm-charts/gerrit/test_chart_gerrit_smoke_test.py b/charts/k8s-gerrit/tests/helm-charts/gerrit/test_chart_gerrit_smoke_test.py
new file mode 100644
index 0000000..b3ee757
--- /dev/null
+++ b/charts/k8s-gerrit/tests/helm-charts/gerrit/test_chart_gerrit_smoke_test.py
@@ -0,0 +1,110 @@
+# pylint: disable=W0613
+
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os.path
+import re
+import shutil
+
+from pathlib import Path
+
+import pygit2 as git
+import pytest
+import requests
+
+import utils
+
+
+@pytest.fixture(scope="module")
+def admin_creds(request):
+ user = request.config.getoption("--gerrit-user")
+ pwd = request.config.getoption("--gerrit-pwd")
+ return user, pwd
+
+
+@pytest.fixture(scope="class")
+def tmp_test_repo(request, tmp_path_factory):
+ tmp_dir = tmp_path_factory.mktemp("gerrit_chart_clone_test")
+ yield tmp_dir
+ shutil.rmtree(tmp_dir)
+
+
+@pytest.fixture(scope="class")
+def random_repo_name():
+ return utils.create_random_string(16)
+
+
+@pytest.mark.smoke
+def test_ui_connection(request):
+ response = requests.get(request.config.getoption("--ingress-url"))
+ assert response.status_code == requests.codes["OK"]
+ assert re.search(r'content="Gerrit Code Review"', response.text)
+
+
+@pytest.mark.smoke
+@pytest.mark.incremental
+class TestGerritRestGitCalls:
+ def _is_delete_project_plugin_enabled(self, gerrit_url, user, pwd):
+ url = f"{gerrit_url}/a/plugins/delete-project/gerrit~status"
+ response = requests.get(url, auth=(user, pwd))
+ return response.status_code == requests.codes["OK"]
+
+ def test_create_project_rest(self, request, random_repo_name, admin_creds):
+ ingress_url = request.config.getoption("--ingress-url")
+ create_project_url = f"{ingress_url}/a/projects/{random_repo_name}"
+ response = requests.put(create_project_url, auth=admin_creds)
+ assert response.status_code == requests.codes["CREATED"]
+
+ def test_cloning_project(
+ self, request, tmp_test_repo, random_repo_name, admin_creds
+ ):
+ repo_url = f"{request.config.getoption('--ingress-url')}/{random_repo_name}.git"
+ repo_url = repo_url.replace("//", f"//{admin_creds[0]}:{admin_creds[1]}@")
+ repo = git.clone_repository(repo_url, tmp_test_repo)
+ assert repo.path == os.path.join(tmp_test_repo, ".git/")
+
+ def test_push_commit(self, tmp_test_repo):
+ repo = git.Repository(tmp_test_repo)
+ file_name = os.path.join(tmp_test_repo, "test.txt")
+ Path(file_name).touch()
+ repo.index.add("test.txt")
+ repo.index.write()
+ # pylint: disable=E1101
+ author = git.Signature("Gerrit Review", "gerrit@review.com")
+ committer = git.Signature("Gerrit Review", "gerrit@review.com")
+ message = "Initial commit"
+ tree = repo.index.write_tree()
+ repo.create_commit("HEAD", author, committer, message, tree, [])
+
+ origin = repo.remotes["origin"]
+ origin.push(["refs/heads/master:refs/heads/master"])
+
+ remote_refs = origin.ls_remotes()
+ assert remote_refs[0]["name"] == repo.revparse_single("HEAD").hex
+
+ def test_delete_project_rest(self, request, random_repo_name, admin_creds):
+ ingress_url = request.config.getoption("--ingress-url")
+ if not self._is_delete_project_plugin_enabled(
+ ingress_url, admin_creds[0], admin_creds[1]
+ ):
+ pytest.skip(
+ "Delete-project plugin not installed."
+ + f"The test project ({random_repo_name}) has to be deleted manually."
+ )
+ project_url = (
+ f"{ingress_url}/a/projects/{random_repo_name}/delete-project~delete"
+ )
+ response = requests.post(project_url, auth=admin_creds)
+ assert response.status_code == requests.codes["NO_CONTENT"]
diff --git a/charts/k8s-gerrit/tests/helm-charts/gerrit/test_chart_gerrit_ssl.py b/charts/k8s-gerrit/tests/helm-charts/gerrit/test_chart_gerrit_ssl.py
new file mode 100644
index 0000000..0eee0f4
--- /dev/null
+++ b/charts/k8s-gerrit/tests/helm-charts/gerrit/test_chart_gerrit_ssl.py
@@ -0,0 +1,89 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os.path
+
+import pygit2 as git
+import pytest
+import requests
+
+import git_callbacks
+import mock_ssl
+
+
+@pytest.fixture(scope="module")
+def cert_dir(tmp_path_factory):
+ return tmp_path_factory.mktemp("gerrit-cert")
+
+
+def _create_ssl_certificate(url, cert_dir):
+ keypair = mock_ssl.MockSSLKeyPair("*." + url.split(".", 1)[1], url)
+ with open(os.path.join(cert_dir, "server.crt"), "wb") as f:
+ f.write(keypair.get_cert())
+ with open(os.path.join(cert_dir, "server.key"), "wb") as f:
+ f.write(keypair.get_key())
+ return keypair
+
+
+@pytest.fixture(scope="class")
+def gerrit_deployment_with_ssl(cert_dir, gerrit_deployment):
+ ssl_certificate = _create_ssl_certificate(gerrit_deployment.hostname, cert_dir)
+ gerrit_deployment.set_helm_value("ingress.tls.enabled", True)
+ gerrit_deployment.set_helm_value(
+ "ingress.tls.cert", ssl_certificate.get_cert().decode()
+ )
+ gerrit_deployment.set_helm_value(
+ "ingress.tls.key", ssl_certificate.get_key().decode()
+ )
+ gerrit_deployment.set_gerrit_config_value(
+ "httpd", "listenUrl", "proxy-https://*:8080/"
+ )
+ gerrit_deployment.set_gerrit_config_value(
+ "gerrit",
+ "canonicalWebUrl",
+ f"https://{gerrit_deployment.hostname}",
+ )
+
+ gerrit_deployment.install()
+ gerrit_deployment.create_admin_account()
+
+ yield gerrit_deployment
+
+
+@pytest.mark.incremental
+@pytest.mark.integration
+@pytest.mark.kubernetes
+@pytest.mark.slow
+class TestgerritChartSetup:
+ # pylint: disable=W0613
+ def test_create_project_rest(
+ self, cert_dir, gerrit_deployment_with_ssl, ldap_credentials
+ ):
+ create_project_url = (
+ f"https://{gerrit_deployment_with_ssl.hostname}/a/projects/test"
+ )
+ response = requests.put(
+ create_project_url,
+ auth=("gerrit-admin", ldap_credentials["gerrit-admin"]),
+ verify=os.path.join(cert_dir, "server.crt"),
+ )
+ assert response.status_code == 201
+
+ def test_cloning_project(self, tmp_path_factory, gerrit_deployment_with_ssl):
+ clone_dest = tmp_path_factory.mktemp("gerrit_chart_clone_test")
+ repo_url = f"https://{gerrit_deployment_with_ssl.hostname}/test.git"
+ repo = git.clone_repository(
+ repo_url, clone_dest, callbacks=git_callbacks.TestRemoteCallbacks()
+ )
+ assert repo.path == os.path.join(clone_dest, ".git/")
diff --git a/charts/k8s-gerrit/tests/helm-charts/gerrit/test_chart_gerrit_usage.py b/charts/k8s-gerrit/tests/helm-charts/gerrit/test_chart_gerrit_usage.py
new file mode 100644
index 0000000..f63d209
--- /dev/null
+++ b/charts/k8s-gerrit/tests/helm-charts/gerrit/test_chart_gerrit_usage.py
@@ -0,0 +1,51 @@
+# pylint: disable=W0613
+
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os.path
+
+import pygit2 as git
+import pytest
+import requests
+
+
+@pytest.mark.slow
+@pytest.mark.incremental
+@pytest.mark.integration
+@pytest.mark.kubernetes
+class TestGerritChartSetup:
+ @pytest.mark.timeout(240)
+ def test_create_project_rest(self, default_gerrit_deployment, ldap_credentials):
+ create_project_url = (
+ f"http://{default_gerrit_deployment.hostname}/a/projects/test"
+ )
+ response = None
+
+ while not response:
+ try:
+ response = requests.put(
+ create_project_url,
+ auth=("gerrit-admin", ldap_credentials["gerrit-admin"]),
+ )
+ except requests.exceptions.ConnectionError:
+ break
+
+ assert response.status_code == 201
+
+ def test_cloning_project(self, tmp_path_factory, default_gerrit_deployment):
+ clone_dest = tmp_path_factory.mktemp("gerrit_chart_clone_test")
+ repo_url = f"http://{default_gerrit_deployment.hostname}/test.git"
+ repo = git.clone_repository(repo_url, clone_dest)
+ assert repo.path == os.path.join(clone_dest, ".git/")
diff --git a/charts/k8s-gerrit/tests/helpers/git_callbacks.py b/charts/k8s-gerrit/tests/helpers/git_callbacks.py
new file mode 100644
index 0000000..3922a24
--- /dev/null
+++ b/charts/k8s-gerrit/tests/helpers/git_callbacks.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pygit2 as git
+
+
+class TestRemoteCallbacks(git.RemoteCallbacks):
+ def certificate_check(self, certificate, valid, host):
+ return True
diff --git a/charts/k8s-gerrit/tests/helpers/mock_ssl.py b/charts/k8s-gerrit/tests/helpers/mock_ssl.py
new file mode 100644
index 0000000..46d766c
--- /dev/null
+++ b/charts/k8s-gerrit/tests/helpers/mock_ssl.py
@@ -0,0 +1,49 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from OpenSSL import crypto
+
+
+class MockSSLKeyPair:
+ def __init__(self, common_name, subject_alt_name):
+ self.common_name = common_name
+ self.subject_alt_name = subject_alt_name
+ self.cert = None
+ self.key = None
+
+ self._create_keypair()
+
+ def _create_keypair(self):
+ self.key = crypto.PKey()
+ self.key.generate_key(crypto.TYPE_RSA, 2048)
+
+ self.cert = crypto.X509()
+ self.cert.set_version(2)
+ self.cert.get_subject().O = "Gerrit"
+ self.cert.get_subject().CN = self.common_name
+ san = f"DNS:{self.subject_alt_name}"
+ self.cert.add_extensions(
+ [crypto.X509Extension(b"subjectAltName", False, san.encode())]
+ )
+ self.cert.gmtime_adj_notBefore(0)
+ self.cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)
+ self.cert.set_issuer(self.cert.get_subject())
+ self.cert.set_pubkey(self.key)
+ self.cert.sign(self.key, "sha256")
+
+ def get_key(self):
+ return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.key)
+
+ def get_cert(self):
+ return crypto.dump_certificate(crypto.FILETYPE_PEM, self.cert)
diff --git a/charts/k8s-gerrit/tests/helpers/utils.py b/charts/k8s-gerrit/tests/helpers/utils.py
new file mode 100644
index 0000000..804217e
--- /dev/null
+++ b/charts/k8s-gerrit/tests/helpers/utils.py
@@ -0,0 +1,41 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import random
+import string
+
+
+def check_if_ancestor_image_is_inherited(image, ancestor):
+ """Helper function that looks for a given ancestor image in the layers of a
+ provided image. It can be used to check, whether an image uses the expected
+ FROM-statement
+
+ Arguments:
+ image {docker.images.Image} -- Docker image object to be checked
+ ancestor {str} -- Complete name of the expected ancestor image
+
+ Returns:
+ boolean -- True, if ancestor is inherited by image
+ """
+
+ contains_tag = False
+ for layer in image.history():
+ contains_tag = layer["Tags"] is not None and ancestor in layer["Tags"]
+ if contains_tag:
+ break
+ return contains_tag
+
+
+def create_random_string(length=8):
+ return "".join([random.choice(string.ascii_letters) for n in range(length)]).lower()
diff --git a/charts/launcher/.helmingonre b/charts/launcher/.helmingonre
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/launcher/.helmingonre
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/launcher/Chart.yaml b/charts/launcher/Chart.yaml
new file mode 100644
index 0000000..494c33b
--- /dev/null
+++ b/charts/launcher/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: launcher
+description: A Helm chart for PCloud Launcher
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/launcher/templates/install.yaml b/charts/launcher/templates/install.yaml
new file mode 100644
index 0000000..c2b3330
--- /dev/null
+++ b/charts/launcher/templates/install.yaml
@@ -0,0 +1,60 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: launcher
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: launcher
+ ports:
+ - name: {{ .Values.portName }}
+ protocol: TCP
+ port: 80
+ targetPort: http
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: ssh-key
+type: Opaque
+data:
+ private: {{ .Values.sshPrivateKey }}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: launcher
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: launcher
+ template:
+ metadata:
+ labels:
+ app: launcher
+ spec:
+ volumes:
+ - name: ssh-key
+ secret:
+ secretName: ssh-key
+ containers:
+ - name: launcher
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ ports:
+ - name: http
+ containerPort: 8080
+ protocol: TCP
+ command:
+ - pcloud-installer
+ - launcher
+ - --port=8080
+ - --logout-url={{ .Values.logoutUrl }}
+ - --ssh-key=/pcloud/ssh-key/private
+ - --repo-addr={{ .Values.repoAddr }}
+ volumeMounts:
+ - name: ssh-key
+ readOnly: true
+ mountPath: /pcloud/ssh-key
diff --git a/charts/launcher/values.yaml b/charts/launcher/values.yaml
new file mode 100644
index 0000000..fedfed5
--- /dev/null
+++ b/charts/launcher/values.yaml
@@ -0,0 +1,9 @@
+image:
+ repository: giolekva/launcher
+ tag: latest
+ pullPolicy: Always
+portName: http
+appRepoAddr: ""
+logoutUrl: logout.example.com
+repoAddr: 192.168.0.11
+sshPrivateKey: key
diff --git a/charts/longhorn-1.4.1/.helmignore b/charts/longhorn-1.4.1/.helmignore
new file mode 100644
index 0000000..f0c1319
--- /dev/null
+++ b/charts/longhorn-1.4.1/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/charts/longhorn-1.4.1/Chart.yaml b/charts/longhorn-1.4.1/Chart.yaml
new file mode 100644
index 0000000..d9dd5a1
--- /dev/null
+++ b/charts/longhorn-1.4.1/Chart.yaml
@@ -0,0 +1,28 @@
+apiVersion: v1
+appVersion: v1.4.1
+description: Longhorn is a distributed block storage system for Kubernetes.
+home: https://github.com/longhorn/longhorn
+icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/longhorn/icon/color/longhorn-icon-color.png
+keywords:
+- longhorn
+- storage
+- distributed
+- block
+- device
+- iscsi
+- nfs
+kubeVersion: '>=1.21.0-0'
+maintainers:
+- email: maintainers@longhorn.io
+ name: Longhorn maintainers
+name: longhorn
+sources:
+- https://github.com/longhorn/longhorn
+- https://github.com/longhorn/longhorn-engine
+- https://github.com/longhorn/longhorn-instance-manager
+- https://github.com/longhorn/longhorn-share-manager
+- https://github.com/longhorn/longhorn-manager
+- https://github.com/longhorn/longhorn-ui
+- https://github.com/longhorn/longhorn-tests
+- https://github.com/longhorn/backing-image-manager
+version: 1.4.1
diff --git a/charts/longhorn-1.4.1/README.md b/charts/longhorn-1.4.1/README.md
new file mode 100644
index 0000000..012c058
--- /dev/null
+++ b/charts/longhorn-1.4.1/README.md
@@ -0,0 +1,78 @@
+# Longhorn Chart
+
+> **Important**: Please install the Longhorn chart in the `longhorn-system` namespace only.
+
+> **Warning**: Longhorn doesn't support downgrading from a higher version to a lower version.
+
+## Source Code
+
+Longhorn is 100% open source software. Project source code is spread across a number of repos:
+
+1. Longhorn Engine -- Core controller/replica logic https://github.com/longhorn/longhorn-engine
+2. Longhorn Instance Manager -- Controller/replica instance lifecycle management https://github.com/longhorn/longhorn-instance-manager
+3. Longhorn Share Manager -- NFS provisioner that exposes Longhorn volumes as ReadWriteMany volumes. https://github.com/longhorn/longhorn-share-manager
+4. Backing Image Manager -- Backing image file lifecycle management. https://github.com/longhorn/backing-image-manager
+5. Longhorn Manager -- Longhorn orchestration, includes CSI driver for Kubernetes https://github.com/longhorn/longhorn-manager
+6. Longhorn UI -- Dashboard https://github.com/longhorn/longhorn-ui
+
+## Prerequisites
+
+1. A container runtime compatible with Kubernetes (Docker v1.13+, containerd v1.3.7+, etc.)
+2. Kubernetes >= v1.21
+3. Make sure `bash`, `curl`, `findmnt`, `grep`, `awk` and `blkid` has been installed in all nodes of the Kubernetes cluster.
+4. Make sure `open-iscsi` has been installed, and the `iscsid` daemon is running on all nodes of the Kubernetes cluster. For GKE, recommended Ubuntu as guest OS image since it contains `open-iscsi` already.
+
+## Upgrading to Kubernetes v1.25+
+
+Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
+
+As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `enablePSP` set to `false` if it has been previously set to `true`.
+
+> **Note:**
+> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
+>
+> If your charts get stuck in this state, you may have to clean up your Helm release secrets.
+Upon setting `enablePSP` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
+
+As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Longhorn docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.
+
+## Installation
+1. Add Longhorn chart repository.
+```
+helm repo add longhorn https://charts.longhorn.io
+```
+
+2. Update local Longhorn chart information from chart repository.
+```
+helm repo update
+```
+
+3. Install Longhorn chart.
+- With Helm 2, the following command will create the `longhorn-system` namespace and install the Longhorn chart together.
+```
+helm install longhorn/longhorn --name longhorn --namespace longhorn-system
+```
+- With Helm 3, the following commands will create the `longhorn-system` namespace first, then install the Longhorn chart.
+
+```
+kubectl create namespace longhorn-system
+helm install longhorn longhorn/longhorn --namespace longhorn-system
+```
+
+## Uninstallation
+
+With Helm 2 to uninstall Longhorn.
+```
+kubectl -n longhorn-system patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag
+helm delete longhorn --purge
+```
+
+With Helm 3 to uninstall Longhorn.
+```
+kubectl -n longhorn-system patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag
+helm uninstall longhorn -n longhorn-system
+kubectl delete namespace longhorn-system
+```
+
+---
+Please see [link](https://github.com/longhorn/longhorn) for more information.
diff --git a/charts/longhorn-1.4.1/app-readme.md b/charts/longhorn-1.4.1/app-readme.md
new file mode 100644
index 0000000..cb23135
--- /dev/null
+++ b/charts/longhorn-1.4.1/app-readme.md
@@ -0,0 +1,11 @@
+# Longhorn
+
+Longhorn is a lightweight, reliable and easy to use distributed block storage system for Kubernetes. Once deployed, users can leverage persistent volumes provided by Longhorn.
+
+Longhorn creates a dedicated storage controller for each volume and synchronously replicates the volume across multiple replicas stored on multiple nodes. The storage controller and replicas are themselves orchestrated using Kubernetes. Longhorn supports snapshots, backups and even allows you to schedule recurring snapshots and backups!
+
+**Important**: Please install Longhorn chart in `longhorn-system` namespace only.
+
+**Warning**: Longhorn doesn't support downgrading from a higher version to a lower version.
+
+[Chart Documentation](https://github.com/longhorn/longhorn/blob/master/chart/README.md)
diff --git a/charts/longhorn-1.4.1/questions.yaml b/charts/longhorn-1.4.1/questions.yaml
new file mode 100644
index 0000000..b4ae9de
--- /dev/null
+++ b/charts/longhorn-1.4.1/questions.yaml
@@ -0,0 +1,837 @@
+categories:
+- storage
+namespace: longhorn-system
+questions:
+- variable: image.defaultImage
+ default: "true"
+ description: "Use default Longhorn images"
+ label: Use Default Images
+ type: boolean
+ show_subquestion_if: false
+ group: "Longhorn Images"
+ subquestions:
+ - variable: image.longhorn.manager.repository
+ default: longhornio/longhorn-manager
+ description: "Specify Longhorn Manager Image Repository"
+ type: string
+ label: Longhorn Manager Image Repository
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.manager.tag
+ default: v1.4.1
+ description: "Specify Longhorn Manager Image Tag"
+ type: string
+ label: Longhorn Manager Image Tag
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.engine.repository
+ default: longhornio/longhorn-engine
+ description: "Specify Longhorn Engine Image Repository"
+ type: string
+ label: Longhorn Engine Image Repository
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.engine.tag
+ default: v1.4.1
+ description: "Specify Longhorn Engine Image Tag"
+ type: string
+ label: Longhorn Engine Image Tag
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.ui.repository
+ default: longhornio/longhorn-ui
+ description: "Specify Longhorn UI Image Repository"
+ type: string
+ label: Longhorn UI Image Repository
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.ui.tag
+ default: v1.4.1
+ description: "Specify Longhorn UI Image Tag"
+ type: string
+ label: Longhorn UI Image Tag
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.instanceManager.repository
+ default: longhornio/longhorn-instance-manager
+ description: "Specify Longhorn Instance Manager Image Repository"
+ type: string
+ label: Longhorn Instance Manager Image Repository
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.instanceManager.tag
+ default: v1.4.1
+ description: "Specify Longhorn Instance Manager Image Tag"
+ type: string
+ label: Longhorn Instance Manager Image Tag
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.shareManager.repository
+ default: longhornio/longhorn-share-manager
+ description: "Specify Longhorn Share Manager Image Repository"
+ type: string
+ label: Longhorn Share Manager Image Repository
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.shareManager.tag
+ default: v1.4.1
+ description: "Specify Longhorn Share Manager Image Tag"
+ type: string
+ label: Longhorn Share Manager Image Tag
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.backingImageManager.repository
+ default: longhornio/backing-image-manager
+ description: "Specify Longhorn Backing Image Manager Image Repository"
+ type: string
+ label: Longhorn Backing Image Manager Image Repository
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.backingImageManager.tag
+ default: v1.4.1
+ description: "Specify Longhorn Backing Image Manager Image Tag"
+ type: string
+ label: Longhorn Backing Image Manager Image Tag
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.supportBundleKit.repository
+ default: longhornio/support-bundle-kit
+ description: "Specify Longhorn Support Bundle Manager Image Repository"
+ type: string
+ label: Longhorn Support Bundle Kit Image Repository
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.supportBundleKit.tag
+ default: v0.0.17
+ description: "Specify Longhorn Support Bundle Manager Image Tag"
+ type: string
+ label: Longhorn Support Bundle Kit Image Tag
+ group: "Longhorn Images Settings"
+ - variable: image.csi.attacher.repository
+ default: longhornio/csi-attacher
+ description: "Specify CSI attacher image repository. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Attacher Image Repository
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.attacher.tag
+ default: v3.4.0
+ description: "Specify CSI attacher image tag. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Attacher Image Tag
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.provisioner.repository
+ default: longhornio/csi-provisioner
+ description: "Specify CSI provisioner image repository. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Provisioner Image Repository
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.provisioner.tag
+ default: v2.1.2
+ description: "Specify CSI provisioner image tag. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Provisioner Image Tag
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.nodeDriverRegistrar.repository
+ default: longhornio/csi-node-driver-registrar
+ description: "Specify CSI Node Driver Registrar image repository. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Node Driver Registrar Image Repository
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.nodeDriverRegistrar.tag
+ default: v2.5.0
+ description: "Specify CSI Node Driver Registrar image tag. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Node Driver Registrar Image Tag
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.resizer.repository
+ default: longhornio/csi-resizer
+ description: "Specify CSI Driver Resizer image repository. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Driver Resizer Image Repository
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.resizer.tag
+ default: v1.3.0
+ description: "Specify CSI Driver Resizer image tag. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Driver Resizer Image Tag
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.snapshotter.repository
+ default: longhornio/csi-snapshotter
+ description: "Specify CSI Driver Snapshotter image repository. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Driver Snapshotter Image Repository
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.snapshotter.tag
+ default: v5.0.1
+ description: "Specify CSI Driver Snapshotter image tag. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Driver Snapshotter Image Tag
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.livenessProbe.repository
+ default: longhornio/livenessprobe
+ description: "Specify CSI liveness probe image repository. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Liveness Probe Image Repository
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.livenessProbe.tag
+ default: v2.8.0
+ description: "Specify CSI liveness probe image tag. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Liveness Probe Image Tag
+ group: "Longhorn CSI Driver Images"
+- variable: privateRegistry.registryUrl
+ label: Private registry URL
+ description: "URL of private registry. Leave blank to apply system default registry."
+ group: "Private Registry Settings"
+ type: string
+ default: ""
+- variable: privateRegistry.registrySecret
+ label: Private registry secret name
+ description: "If create a new private registry secret is true, create a Kubernetes secret with this name; else use the existing secret of this name. Use it to pull images from your private registry."
+ group: "Private Registry Settings"
+ type: string
+ default: ""
+- variable: privateRegistry.createSecret
+ default: "true"
+ description: "Create a new private registry secret"
+ type: boolean
+ group: "Private Registry Settings"
+ label: Create Secret for Private Registry Settings
+ show_subquestion_if: true
+ subquestions:
+ - variable: privateRegistry.registryUser
+ label: Private registry user
+ description: "User used to authenticate to private registry."
+ type: string
+ default: ""
+ - variable: privateRegistry.registryPasswd
+ label: Private registry password
+ description: "Password used to authenticate to private registry."
+ type: password
+ default: ""
+- variable: longhorn.default_setting
+ default: "false"
+ description: "Customize the default settings before installing Longhorn for the first time. This option will only work if the cluster hasn't installed Longhorn."
+ label: "Customize Default Settings"
+ type: boolean
+ show_subquestion_if: true
+ group: "Longhorn Default Settings"
+ subquestions:
+ - variable: csi.kubeletRootDir
+ default:
+ description: "Specify kubelet root-dir. Leave blank to autodetect."
+ type: string
+ label: Kubelet Root Directory
+ group: "Longhorn CSI Driver Settings"
+ - variable: csi.attacherReplicaCount
+ type: int
+ default: 3
+ min: 1
+ max: 10
+ description: "Specify replica count of CSI Attacher. By default 3."
+ label: Longhorn CSI Attacher replica count
+ group: "Longhorn CSI Driver Settings"
+ - variable: csi.provisionerReplicaCount
+ type: int
+ default: 3
+ min: 1
+ max: 10
+ description: "Specify replica count of CSI Provisioner. By default 3."
+ label: Longhorn CSI Provisioner replica count
+ group: "Longhorn CSI Driver Settings"
+ - variable: csi.resizerReplicaCount
+ type: int
+ default: 3
+ min: 1
+ max: 10
+ description: "Specify replica count of CSI Resizer. By default 3."
+ label: Longhorn CSI Resizer replica count
+ group: "Longhorn CSI Driver Settings"
+ - variable: csi.snapshotterReplicaCount
+ type: int
+ default: 3
+ min: 1
+ max: 10
+ description: "Specify replica count of CSI Snapshotter. By default 3."
+ label: Longhorn CSI Snapshotter replica count
+ group: "Longhorn CSI Driver Settings"
+ - variable: defaultSettings.backupTarget
+ label: Backup Target
+ description: "The endpoint used to access the backupstore. NFS and S3 are supported."
+ group: "Longhorn Default Settings"
+ type: string
+ default:
+ - variable: defaultSettings.backupTargetCredentialSecret
+ label: Backup Target Credential Secret
+ description: "The name of the Kubernetes secret associated with the backup target."
+ group: "Longhorn Default Settings"
+ type: string
+ default:
+ - variable: defaultSettings.allowRecurringJobWhileVolumeDetached
+ label: Allow Recurring Job While Volume Is Detached
+ description: 'If this setting is enabled, Longhorn will automatically attaches the volume and takes snapshot/backup when it is the time to do recurring snapshot/backup.
+Note that the volume is not ready for workload during the period when the volume was automatically attached. Workload will have to wait until the recurring job finishes.'
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "false"
+ - variable: defaultSettings.createDefaultDiskLabeledNodes
+ label: Create Default Disk on Labeled Nodes
+ description: 'Create default Disk automatically only on Nodes with the label "node.longhorn.io/create-default-disk=true" if no other disks exist. If disabled, the default disk will be created on all new nodes when each node is first added.'
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "false"
+ - variable: defaultSettings.defaultDataPath
+ label: Default Data Path
+ description: 'Default path to use for storing data on a host. By default "/var/lib/longhorn/"'
+ group: "Longhorn Default Settings"
+ type: string
+ default: "/var/lib/longhorn/"
+ - variable: defaultSettings.defaultDataLocality
+ label: Default Data Locality
+ description: 'We say a Longhorn volume has data locality if there is a local replica of the volume on the same node as the pod which is using the volume.
+This setting specifies the default data locality when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `dataLocality` in the StorageClass
+The available modes are:
+- **disabled**. This is the default option. There may or may not be a replica on the same node as the attached volume (workload)
+- **best-effort**. This option instructs Longhorn to try to keep a replica on the same node as the attached volume (workload). Longhorn will not stop the volume, even if it cannot keep a replica local to the attached volume (workload) due to environment limitation, e.g. not enough disk space, incompatible disk tags, etc.'
+ group: "Longhorn Default Settings"
+ type: enum
+ options:
+ - "disabled"
+ - "best-effort"
+ default: "disabled"
+ - variable: defaultSettings.replicaSoftAntiAffinity
+ label: Replica Node Level Soft Anti-Affinity
+ description: 'Allow scheduling on nodes with existing healthy replicas of the same volume. By default false.'
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "false"
+ - variable: defaultSettings.replicaAutoBalance
+ label: Replica Auto Balance
+ description: 'Enable this setting automatically rebalances replicas when discovered an available node.
+The available global options are:
+- **disabled**. This is the default option. No replica auto-balance will be done.
+- **least-effort**. This option instructs Longhorn to balance replicas for minimal redundancy.
+- **best-effort**. This option instructs Longhorn to balance replicas for even redundancy.
+Longhorn also support individual volume setting. The setting can be specified in volume.spec.replicaAutoBalance, this overrules the global setting.
+The available volume spec options are:
+- **ignored**. This is the default option that instructs Longhorn to inherit from the global setting.
+- **disabled**. This option instructs Longhorn no replica auto-balance should be done.
+- **least-effort**. This option instructs Longhorn to balance replicas for minimal redundancy.
+- **best-effort**. This option instructs Longhorn to balance replicas for even redundancy.'
+ group: "Longhorn Default Settings"
+ type: enum
+ options:
+ - "disabled"
+ - "least-effort"
+ - "best-effort"
+ default: "disabled"
+ - variable: defaultSettings.storageOverProvisioningPercentage
+ label: Storage Over Provisioning Percentage
+ description: "The over-provisioning percentage defines how much storage can be allocated relative to the hard drive's capacity. By default 200."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 200
+ - variable: defaultSettings.storageMinimalAvailablePercentage
+ label: Storage Minimal Available Percentage
+ description: "If the minimum available disk capacity exceeds the actual percentage of available disk capacity, the disk becomes unschedulable until more space is freed up. By default 25."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ max: 100
+ default: 25
+ - variable: defaultSettings.upgradeChecker
+ label: Enable Upgrade Checker
+ description: 'Upgrade Checker will check for new Longhorn version periodically. When there is a new version available, a notification will appear in the UI. By default true.'
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "true"
+ - variable: defaultSettings.defaultReplicaCount
+ label: Default Replica Count
+ description: "The default number of replicas when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `numberOfReplicas` in the StorageClass. By default 3."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 1
+ max: 20
+ default: 3
+ - variable: defaultSettings.defaultLonghornStaticStorageClass
+ label: Default Longhorn Static StorageClass Name
+ description: "The 'storageClassName' is given to PVs and PVCs that are created for an existing Longhorn volume. The StorageClass name can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. By default 'longhorn-static'."
+ group: "Longhorn Default Settings"
+ type: string
+ default: "longhorn-static"
+ - variable: defaultSettings.backupstorePollInterval
+ label: Backupstore Poll Interval
+ description: "In seconds. The backupstore poll interval determines how often Longhorn checks the backupstore for new backups. Set to 0 to disable the polling. By default 300."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 300
+ - variable: defaultSettings.failedBackupTTL
+ label: Failed Backup Time to Live
+ description: "In minutes. This setting determines how long Longhorn will keep the backup resource that was failed. Set to 0 to disable the auto-deletion.
+Failed backups will be checked and cleaned up during backupstore polling which is controlled by **Backupstore Poll Interval** setting.
+Hence this value determines the minimal wait interval of the cleanup. And the actual cleanup interval is multiple of **Backupstore Poll Interval**.
+Disabling **Backupstore Poll Interval** also means to disable failed backup auto-deletion."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 1440
+ - variable: defaultSettings.restoreVolumeRecurringJobs
+ label: Restore Volume Recurring Jobs
+ description: "Restore recurring jobs from the backup volume on the backup target and create recurring jobs if not exist during a backup restoration.
+Longhorn also supports individual volume setting. The setting can be specified on Backup page when making a backup restoration, this overrules the global setting.
+The available volume setting options are:
+- **ignored**. This is the default option that instructs Longhorn to inherit from the global setting.
+- **enabled**. This option instructs Longhorn to restore recurring jobs/groups from the backup target forcibly.
+- **disabled**. This option instructs Longhorn no restoring recurring jobs/groups should be done."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "false"
+ - variable: defaultSettings.recurringSuccessfulJobsHistoryLimit
+ label: Cronjob Successful Jobs History Limit
+ description: "This setting specifies how many successful backup or snapshot job histories should be retained. History will not be retained if the value is 0."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 1
+ - variable: defaultSettings.recurringFailedJobsHistoryLimit
+ label: Cronjob Failed Jobs History Limit
+ description: "This setting specifies how many failed backup or snapshot job histories should be retained. History will not be retained if the value is 0."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 1
+ - variable: defaultSettings.supportBundleFailedHistoryLimit
+ label: SupportBundle Failed History Limit
+ description: "This setting specifies how many failed support bundles can exist in the cluster.
+The retained failed support bundle is for analysis purposes and needs to clean up manually.
+Set this value to **0** to have Longhorn automatically purge all failed support bundles."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 1
+ - variable: defaultSettings.autoSalvage
+ label: Automatic salvage
+ description: "If enabled, volumes will be automatically salvaged when all the replicas become faulty e.g. due to network disconnection. Longhorn will try to figure out which replica(s) are usable, then use them for the volume. By default true."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "true"
+ - variable: defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly
+ label: Automatically Delete Workload Pod when The Volume Is Detached Unexpectedly
+ description: 'If enabled, Longhorn will automatically delete the workload pod that is managed by a controller (e.g. deployment, statefulset, daemonset, etc...) when Longhorn volume is detached unexpectedly (e.g. during Kubernetes upgrade, Docker reboot, or network disconnect). By deleting the pod, its controller restarts the pod and Kubernetes handles volume reattachment and remount.
+If disabled, Longhorn will not delete the workload pod that is managed by a controller. You will have to manually restart the pod to reattach and remount the volume.
+**Note:** This setting does not apply to the workload pods that do not have a controller. Longhorn never deletes them.'
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "true"
+ - variable: defaultSettings.disableSchedulingOnCordonedNode
+ label: Disable Scheduling On Cordoned Node
+ description: "Disable Longhorn manager to schedule replica on Kubernetes cordoned node. By default true."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "true"
+ - variable: defaultSettings.replicaZoneSoftAntiAffinity
+ label: Replica Zone Level Soft Anti-Affinity
+ description: "Allow scheduling new Replicas of Volume to the Nodes in the same Zone as existing healthy Replicas. Nodes don't belong to any Zone will be treated as in the same Zone. Notice that Longhorn relies on label `topology.kubernetes.io/zone=<Zone name of the node>` in the Kubernetes node object to identify the zone. By default true."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "true"
+ - variable: defaultSettings.nodeDownPodDeletionPolicy
+ label: Pod Deletion Policy When Node is Down
+ description: "Defines the Longhorn action when a Volume is stuck with a StatefulSet/Deployment Pod on a node that is down.
+- **do-nothing** is the default Kubernetes behavior of never force deleting StatefulSet/Deployment terminating pods. Since the pod on the node that is down isn't removed, Longhorn volumes are stuck on nodes that are down.
+- **delete-statefulset-pod** Longhorn will force delete StatefulSet terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods.
+- **delete-deployment-pod** Longhorn will force delete Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods.
+- **delete-both-statefulset-and-deployment-pod** Longhorn will force delete StatefulSet/Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods."
+ group: "Longhorn Default Settings"
+ type: enum
+ options:
+ - "do-nothing"
+ - "delete-statefulset-pod"
+ - "delete-deployment-pod"
+ - "delete-both-statefulset-and-deployment-pod"
+ default: "do-nothing"
+ - variable: defaultSettings.allowNodeDrainWithLastHealthyReplica
+ label: Allow Node Drain with the Last Healthy Replica
+ description: "By default, Longhorn will block `kubectl drain` action on a node if the node contains the last healthy replica of a volume.
+If this setting is enabled, Longhorn will **not** block `kubectl drain` action on a node even if the node contains the last healthy replica of a volume."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "false"
+ - variable: defaultSettings.mkfsExt4Parameters
+ label: Custom mkfs.ext4 parameters
+ description: "Allows setting additional filesystem creation parameters for ext4. For older host kernels it might be necessary to disable the optional ext4 metadata_csum feature by specifying `-O ^64bit,^metadata_csum`."
+ group: "Longhorn Default Settings"
+ type: string
+ - variable: defaultSettings.disableReplicaRebuild
+ label: Disable Replica Rebuild
+ description: "This setting disable replica rebuild cross the whole cluster, eviction and data locality feature won't work if this setting is true. But doesn't have any impact to any current replica rebuild and restore disaster recovery volume."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "false"
+ - variable: defaultSettings.replicaReplenishmentWaitInterval
+ label: Replica Replenishment Wait Interval
+ description: "In seconds. The interval determines how long Longhorn will wait at least in order to reuse the existing data on a failed replica rather than directly creating a new replica for a degraded volume.
+Warning: This option works only when there is a failed replica in the volume. And this option may block the rebuilding for a while in the case."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 600
+ - variable: defaultSettings.concurrentReplicaRebuildPerNodeLimit
+ label: Concurrent Replica Rebuild Per Node Limit
+ description: "This setting controls how many replicas on a node can be rebuilt simultaneously.
+Typically, Longhorn can block the replica starting once the current rebuilding count on a node exceeds the limit. But when the value is 0, it means disabling the replica rebuilding.
+WARNING:
+- The old setting \"Disable Replica Rebuild\" is replaced by this setting.
+- Different from relying on replica starting delay to limit the concurrent rebuilding, if the rebuilding is disabled, replica object replenishment will be directly skipped.
+- When the value is 0, the eviction and data locality feature won't work. But this shouldn't have any impact to any current replica rebuild and backup restore."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 5
+ - variable: defaultSettings.concurrentVolumeBackupRestorePerNodeLimit
+ label: Concurrent Volume Backup Restore Per Node Limit
+ description: "This setting controls how many volumes on a node can restore the backup concurrently.
+Longhorn blocks the backup restore once the restoring volume count exceeds the limit.
+Set the value to **0** to disable backup restore."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 5
+ - variable: defaultSettings.disableRevisionCounter
+ label: Disable Revision Counter
+ description: "This setting is only for volumes created by UI. By default, this is false meaning there will be a reivision counter file to track every write to the volume. During salvage recovering Longhorn will pick the replica with largest reivision counter as candidate to recover the whole volume. If revision counter is disabled, Longhorn will not track every write to the volume. During the salvage recovering, Longhorn will use the 'volume-head-xxx.img' file last modification time and file size to pick the replica candidate to recover the whole volume."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "false"
+ - variable: defaultSettings.systemManagedPodsImagePullPolicy
+ label: System Managed Pod Image Pull Policy
+ description: "This setting defines the Image Pull Policy of Longhorn system managed pods, e.g. instance manager, engine image, CSI driver, etc. The new Image Pull Policy will only apply after the system managed pods restart."
+ group: "Longhorn Default Settings"
+ type: enum
+ options:
+ - "if-not-present"
+ - "always"
+ - "never"
+ default: "if-not-present"
+ - variable: defaultSettings.allowVolumeCreationWithDegradedAvailability
+ label: Allow Volume Creation with Degraded Availability
+ description: "This setting allows user to create and attach a volume that doesn't have all the replicas scheduled at the time of creation."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "true"
+ - variable: defaultSettings.autoCleanupSystemGeneratedSnapshot
+ label: Automatically Cleanup System Generated Snapshot
+ description: "This setting enables Longhorn to automatically cleanup the system generated snapshot after replica rebuild is done."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "true"
+ - variable: defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit
+ label: Concurrent Automatic Engine Upgrade Per Node Limit
+ description: "This setting controls how Longhorn automatically upgrades volumes' engines to the new default engine image after upgrading Longhorn manager. The value of this setting specifies the maximum number of engines per node that are allowed to upgrade to the default engine image at the same time. If the value is 0, Longhorn will not automatically upgrade volumes' engines to default version."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 0
+ - variable: defaultSettings.backingImageCleanupWaitInterval
+ label: Backing Image Cleanup Wait Interval
+ description: "This interval in minutes determines how long Longhorn will wait before cleaning up the backing image file when there is no replica in the disk using it."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 60
+ - variable: defaultSettings.backingImageRecoveryWaitInterval
+ label: Backing Image Recovery Wait Interval
+ description: "This interval in seconds determines how long Longhorn will wait before re-downloading the backing image file when all disk files of this backing image become failed or unknown.
+ WARNING:
+ - This recovery only works for the backing image of which the creation type is \"download\".
+ - File state \"unknown\" means the related manager pods on the pod is not running or the node itself is down/disconnected."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 300
+ - variable: defaultSettings.guaranteedEngineManagerCPU
+ label: Guaranteed Engine Manager CPU
+ description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each engine manager Pod. For example, 10 means 10% of the total CPU on a node will be allocated to each engine manager pod on this node. This will help maintain engine stability during high node workload.
+ In order to prevent unexpected volume engine crash as well as guarantee a relative acceptable IO performance, you can use the following formula to calculate a value for this setting:
+ Guaranteed Engine Manager CPU = The estimated max Longhorn volume engine count on a node * 0.1 / The total allocatable CPUs on the node * 100.
+ The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting.
+ If it's hard to estimate the usage now, you can leave it with the default value, which is 12%. Then you can tune it when there is no running workload using Longhorn volumes.
+ WARNING:
+ - Value 0 means unsetting CPU requests for engine manager pods.
+ - Considering the possible new instance manager pods in the further system upgrade, this integer value is range from 0 to 40. And the sum with setting 'Guaranteed Engine Manager CPU' should not be greater than 40.
+ - One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then.
+ - This global setting will be ignored for a node if the field \"EngineManagerCPURequest\" on the node is set.
+ - After this setting is changed, all engine manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ max: 40
+ default: 12
+ - variable: defaultSettings.guaranteedReplicaManagerCPU
+ label: Guaranteed Replica Manager CPU
+ description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each replica manager Pod. 10 means 10% of the total CPU on a node will be allocated to each replica manager pod on this node. This will help maintain replica stability during high node workload.
+ In order to prevent unexpected volume replica crash as well as guarantee a relative acceptable IO performance, you can use the following formula to calculate a value for this setting:
+ Guaranteed Replica Manager CPU = The estimated max Longhorn volume replica count on a node * 0.1 / The total allocatable CPUs on the node * 100.
+ The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting.
+ If it's hard to estimate the usage now, you can leave it with the default value, which is 12%. Then you can tune it when there is no running workload using Longhorn volumes.
+ WARNING:
+ - Value 0 means unsetting CPU requests for replica manager pods.
+ - Considering the possible new instance manager pods in the further system upgrade, this integer value is range from 0 to 40. And the sum with setting 'Guaranteed Replica Manager CPU' should not be greater than 40.
+ - One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then.
+ - This global setting will be ignored for a node if the field \"ReplicaManagerCPURequest\" on the node is set.
+ - After this setting is changed, all replica manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ max: 40
+ default: 12
+- variable: defaultSettings.kubernetesClusterAutoscalerEnabled
+ label: Kubernetes Cluster Autoscaler Enabled (Experimental)
+ description: "Enabling this setting will notify Longhorn that the cluster is using Kubernetes Cluster Autoscaler.
+ Longhorn prevents data loss by only allowing the Cluster Autoscaler to scale down a node that met all conditions:
+ - No volume attached to the node.
+ - Is not the last node containing the replica of any volume.
+ - Is not running backing image components pod.
+ - Is not running share manager components pod."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: false
+- variable: defaultSettings.orphanAutoDeletion
+ label: Orphaned Data Cleanup
+ description: "This setting allows Longhorn to delete the orphan resource and its corresponding orphaned data automatically like stale replicas. Orphan resources on down or unknown nodes will not be cleaned up automatically."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: false
+- variable: defaultSettings.storageNetwork
+ label: Storage Network
+ description: "Longhorn uses the storage network for in-cluster data traffic. Leave this blank to use the Kubernetes cluster network.
+ To segregate the storage network, input the pre-existing NetworkAttachmentDefinition in \"<namespace>/<name>\" format.
+ WARNING:
+ - The cluster must have pre-existing Multus installed, and NetworkAttachmentDefinition IPs are reachable between nodes.
+ - DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES. Longhorn will try to block this setting update when there are attached volumes.
+ - When applying the setting, Longhorn will restart all manager, instance-manager, and backing-image-manager pods."
+ group: "Longhorn Default Settings"
+ type: string
+ default:
+- variable: defaultSettings.deletingConfirmationFlag
+ label: Deleting Confirmation Flag
+ description: "This flag is designed to prevent Longhorn from being accidentally uninstalled which will lead to data lost.
+ Set this flag to **true** to allow Longhorn uninstallation.
+ If this flag **false**, Longhorn uninstallation job will fail. "
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "false"
+- variable: defaultSettings.engineReplicaTimeout
+ label: Timeout between Engine and Replica
+ description: "In seconds. The setting specifies the timeout between the engine and replica(s), and the value should be between 8 to 30 seconds. The default value is 8 seconds."
+ group: "Longhorn Default Settings"
+ type: int
+ default: "8"
+- variable: defaultSettings.snapshotDataIntegrity
+ label: Snapshot Data Integrity
+ description: "This setting allows users to enable or disable snapshot hashing and data integrity checking.
+ Available options are
+ - **disabled**: Disable snapshot disk file hashing and data integrity checking.
+ - **enabled**: Enables periodic snapshot disk file hashing and data integrity checking. To detect the filesystem-unaware corruption caused by bit rot or other issues in snapshot disk files, Longhorn system periodically hashes files and finds corrupted ones. Hence, the system performance will be impacted during the periodical checking.
+ - **fast-check**: Enable snapshot disk file hashing and fast data integrity checking. Longhorn system only hashes snapshot disk files if their are not hashed or the modification time are changed. In this mode, filesystem-unaware corruption cannot be detected, but the impact on system performance can be minimized."
+ group: "Longhorn Default Settings"
+ type: string
+ default: "disabled"
+- variable: defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation
+ label: Immediate Snapshot Data Integrity Check After Creating a Snapshot
+ description: "Hashing snapshot disk files impacts the performance of the system. The immediate snapshot hashing and checking can be disabled to minimize the impact after creating a snapshot."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "false"
+- variable: defaultSettings.snapshotDataIntegrityCronjob
+ label: Snapshot Data Integrity Check CronJob
+ description: "Unix-cron string format. The setting specifies when Longhorn checks the data integrity of snapshot disk files.
+ Warning: Hashing snapshot disk files impacts the performance of the system. It is recommended to run data integrity checks during off-peak times and to reduce the frequency of checks."
+ group: "Longhorn Default Settings"
+ type: string
+ default: "0 0 */7 * *"
+- variable: defaultSettings.removeSnapshotsDuringFilesystemTrim
+ label: Remove Snapshots During Filesystem Trim
+ description: "This setting allows Longhorn filesystem trim feature to automatically mark the latest snapshot and its ancestors as removed and stops at the snapshot containing multiple children.\n\n
+ Since Longhorn filesystem trim feature can be applied to the volume head and the followed continuous removed or system snapshots only.\n\n
+ Notice that trying to trim a removed files from a valid snapshot will do nothing but the filesystem will discard this kind of in-memory trimmable file info.\n\n
+ Later on if you mark the snapshot as removed and want to retry the trim, you may need to unmount and remount the filesystem so that the filesystem can recollect the trimmable file info."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "false"
+- variable: defaultSettings.fastReplicaRebuildEnabled
+ label: Fast Replica Rebuild Enabled
+ description: "This feature supports the fast replica rebuilding. It relies on the checksum of snapshot disk files, so setting the snapshot-data-integrity to **enable** or **fast-check** is a prerequisite."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: false
+- variable: defaultSettings.replicaFileSyncHttpClientTimeout
+ label: Timeout of HTTP Client to Replica File Sync Server
+ description: "In seconds. The setting specifies the HTTP client timeout to the file sync server."
+ group: "Longhorn Default Settings"
+ type: int
+ default: "30"
+- variable: persistence.defaultClass
+ default: "true"
+ description: "Set as default StorageClass for Longhorn"
+ label: Default Storage Class
+ group: "Longhorn Storage Class Settings"
+ required: true
+ type: boolean
+- variable: persistence.reclaimPolicy
+ label: Storage Class Retain Policy
+ description: "Define reclaim policy (Retain or Delete)"
+ group: "Longhorn Storage Class Settings"
+ required: true
+ type: enum
+ options:
+ - "Delete"
+ - "Retain"
+ default: "Delete"
+- variable: persistence.defaultClassReplicaCount
+ description: "Set replica count for Longhorn StorageClass"
+ label: Default Storage Class Replica Count
+ group: "Longhorn Storage Class Settings"
+ type: int
+ min: 1
+ max: 10
+ default: 3
+- variable: persistence.defaultDataLocality
+ description: "Set data locality for Longhorn StorageClass"
+ label: Default Storage Class Data Locality
+ group: "Longhorn Storage Class Settings"
+ type: enum
+ options:
+ - "disabled"
+ - "best-effort"
+ default: "disabled"
+- variable: persistence.recurringJobSelector.enable
+ description: "Enable recurring job selector for Longhorn StorageClass"
+ group: "Longhorn Storage Class Settings"
+ label: Enable Storage Class Recurring Job Selector
+ type: boolean
+ default: false
+ show_subquestion_if: true
+ subquestions:
+ - variable: persistence.recurringJobSelector.jobList
+ description: 'Recurring job selector list for Longhorn StorageClass. Please be careful of quotes of input. e.g., [{"name":"backup", "isGroup":true}]'
+ label: Storage Class Recurring Job Selector List
+ group: "Longhorn Storage Class Settings"
+ type: string
+ default:
+- variable: defaultSettings.defaultNodeSelector.enable
+ description: "Enable recurring Node selector for Longhorn StorageClass"
+ group: "Longhorn Storage Class Settings"
+ label: Enable Storage Class Node Selector
+ type: boolean
+ default: false
+ show_subquestion_if: true
+ subquestions:
+ - variable: defaultSettings.defaultNodeSelector.selector
+ label: Storage Class Node Selector
+ description: 'We use NodeSelector when we want to bind PVC via StorageClass into desired mountpoint on the nodes tagged whith its value'
+ group: "Longhorn Default Settings"
+ type: string
+ default:
+- variable: persistence.backingImage.enable
+ description: "Set backing image for Longhorn StorageClass"
+ group: "Longhorn Storage Class Settings"
+ label: Default Storage Class Backing Image
+ type: boolean
+ default: false
+ show_subquestion_if: true
+ subquestions:
+ - variable: persistence.backingImage.name
+ description: 'Specify a backing image that will be used by Longhorn volumes in Longhorn StorageClass. If not exists, the backing image data source type and backing image data source parameters should be specified so that Longhorn will create the backing image before using it.'
+ label: Storage Class Backing Image Name
+ group: "Longhorn Storage Class Settings"
+ type: string
+ default:
+ - variable: persistence.backingImage.expectedChecksum
+ description: 'Specify the expected SHA512 checksum of the selected backing image in Longhorn StorageClass.
+ WARNING:
+ - If the backing image name is not specified, setting this field is meaningless.
+ - It is not recommended to set this field if the data source type is \"export-from-volume\".'
+ label: Storage Class Backing Image Expected SHA512 Checksum
+ group: "Longhorn Storage Class Settings"
+ type: string
+ default:
+ - variable: persistence.backingImage.dataSourceType
+ description: 'Specify the data source type for the backing image used in Longhorn StorageClass.
+ If the backing image does not exists, Longhorn will use this field to create a backing image. Otherwise, Longhorn will use it to verify the selected backing image.
+ WARNING:
+ - If the backing image name is not specified, setting this field is meaningless.
+ - As for backing image creation with data source type \"upload\", it is recommended to do it via UI rather than StorageClass here. Uploading requires file data sending to the Longhorn backend after the object creation, which is complicated if you want to handle it manually.'
+ label: Storage Class Backing Image Data Source Type
+ group: "Longhorn Storage Class Settings"
+ type: enum
+ options:
+ - ""
+ - "download"
+ - "upload"
+ - "export-from-volume"
+ default: ""
+ - variable: persistence.backingImage.dataSourceParameters
+ description: "Specify the data source parameters for the backing image used in Longhorn StorageClass.
+ If the backing image does not exists, Longhorn will use this field to create a backing image. Otherwise, Longhorn will use it to verify the selected backing image.
+ This option accepts a json string of a map. e.g., '{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'.
+ WARNING:
+ - If the backing image name is not specified, setting this field is meaningless.
+ - Be careful of the quotes here."
+ label: Storage Class Backing Image Data Source Parameters
+ group: "Longhorn Storage Class Settings"
+ type: string
+ default:
+- variable: persistence.removeSnapshotsDuringFilesystemTrim
+ description: "Allow automatically removing snapshots during filesystem trim for Longhorn StorageClass"
+ label: Default Storage Class Remove Snapshots During Filesystem Trim
+ group: "Longhorn Storage Class Settings"
+ type: enum
+ options:
+ - "ignored"
+ - "enabled"
+ - "disabled"
+ default: "ignored"
+- variable: ingress.enabled
+ default: "false"
+ description: "Expose app using Layer 7 Load Balancer - ingress"
+ type: boolean
+ group: "Services and Load Balancing"
+ label: Expose app using Layer 7 Load Balancer
+ show_subquestion_if: true
+ subquestions:
+ - variable: ingress.host
+ default: "xip.io"
+ description: "layer 7 Load Balancer hostname"
+ type: hostname
+ required: true
+ label: Layer 7 Load Balancer Hostname
+ - variable: ingress.path
+ default: "/"
+ description: "If ingress is enabled you can set the default ingress path"
+ type: string
+ required: true
+ label: Ingress Path
+- variable: service.ui.type
+ default: "Rancher-Proxy"
+ description: "Define Longhorn UI service type"
+ type: enum
+ options:
+ - "ClusterIP"
+ - "NodePort"
+ - "LoadBalancer"
+ - "Rancher-Proxy"
+ label: Longhorn UI Service
+ show_if: "ingress.enabled=false"
+ group: "Services and Load Balancing"
+ show_subquestion_if: "NodePort"
+ subquestions:
+ - variable: service.ui.nodePort
+ default: ""
+ description: "NodePort port number(to set explicitly, choose port between 30000-32767)"
+ type: int
+ min: 30000
+ max: 32767
+ show_if: "service.ui.type=NodePort||service.ui.type=LoadBalancer"
+ label: UI Service NodePort number
+- variable: enablePSP
+ default: "false"
+ description: "Setup a pod security policy for Longhorn workloads."
+ label: Pod Security Policy
+ type: boolean
+ group: "Other Settings"
+- variable: global.cattle.windowsCluster.enabled
+ default: "false"
+ description: "Enable this to allow Longhorn to run on the Rancher deployed Windows cluster."
+ label: Rancher Windows Cluster
+ type: boolean
+ group: "Other Settings"
diff --git a/charts/longhorn-1.4.1/templates/NOTES.txt b/charts/longhorn-1.4.1/templates/NOTES.txt
new file mode 100644
index 0000000..cca7cd7
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/NOTES.txt
@@ -0,0 +1,5 @@
+Longhorn is now installed on the cluster!
+
+Please wait a few minutes for other Longhorn components such as CSI deployments, Engine Images, and Instance Managers to be initialized.
+
+Visit our documentation at https://longhorn.io/docs/
diff --git a/charts/longhorn-1.4.1/templates/_helpers.tpl b/charts/longhorn-1.4.1/templates/_helpers.tpl
new file mode 100644
index 0000000..3fbc2ac
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/_helpers.tpl
@@ -0,0 +1,66 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "longhorn.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "longhorn.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+
+{{- define "longhorn.managerIP" -}}
+{{- $fullname := (include "longhorn.fullname" .) -}}
+{{- printf "http://%s-backend:9500" $fullname | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+
+{{- define "secret" }}
+{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.privateRegistry.registryUrl (printf "%s:%s" .Values.privateRegistry.registryUser .Values.privateRegistry.registryPasswd | b64enc) | b64enc }}
+{{- end }}
+
+{{- /*
+longhorn.labels generates the standard Helm labels.
+*/ -}}
+{{- define "longhorn.labels" -}}
+app.kubernetes.io/name: {{ template "longhorn.name" . }}
+helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+app.kubernetes.io/version: {{ .Chart.AppVersion }}
+{{- end -}}
+
+
+{{- define "system_default_registry" -}}
+{{- if .Values.global.cattle.systemDefaultRegistry -}}
+{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
+{{- else -}}
+{{- "" -}}
+{{- end -}}
+{{- end -}}
+
+{{- define "registry_url" -}}
+{{- if .Values.privateRegistry.registryUrl -}}
+{{- printf "%s/" .Values.privateRegistry.registryUrl -}}
+{{- else -}}
+{{ include "system_default_registry" . }}
+{{- end -}}
+{{- end -}}
+
+{{- /*
+ define the longhorn release namespace
+*/ -}}
+{{- define "release_namespace" -}}
+{{- if .Values.namespaceOverride -}}
+{{- .Values.namespaceOverride -}}
+{{- else -}}
+{{- .Release.Namespace -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/longhorn-1.4.1/templates/clusterrole.yaml b/charts/longhorn-1.4.1/templates/clusterrole.yaml
new file mode 100644
index 0000000..bf28a47
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/clusterrole.yaml
@@ -0,0 +1,60 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: longhorn-role
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+rules:
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - "*"
+- apiGroups: [""]
+ resources: ["pods", "events", "persistentvolumes", "persistentvolumeclaims","persistentvolumeclaims/status", "nodes", "proxy/nodes", "pods/log", "secrets", "services", "endpoints", "configmaps", "serviceaccounts"]
+ verbs: ["*"]
+- apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["get", "list"]
+- apiGroups: ["apps"]
+ resources: ["daemonsets", "statefulsets", "deployments"]
+ verbs: ["*"]
+- apiGroups: ["batch"]
+ resources: ["jobs", "cronjobs"]
+ verbs: ["*"]
+- apiGroups: ["policy"]
+ resources: ["poddisruptionbudgets", "podsecuritypolicies"]
+ verbs: ["*"]
+- apiGroups: ["scheduling.k8s.io"]
+ resources: ["priorityclasses"]
+ verbs: ["watch", "list"]
+- apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses", "volumeattachments", "volumeattachments/status", "csinodes", "csidrivers"]
+ verbs: ["*"]
+- apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotclasses", "volumesnapshots", "volumesnapshotcontents", "volumesnapshotcontents/status"]
+ verbs: ["*"]
+- apiGroups: ["longhorn.io"]
+ resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
+ "engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
+ "sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status",
+ "backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status",
+ "backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status",
+ "recurringjobs", "recurringjobs/status", "orphans", "orphans/status", "snapshots", "snapshots/status",
+ "supportbundles", "supportbundles/status", "systembackups", "systembackups/status", "systemrestores", "systemrestores/status"]
+ verbs: ["*"]
+- apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ verbs: ["*"]
+- apiGroups: ["metrics.k8s.io"]
+ resources: ["pods", "nodes"]
+ verbs: ["get", "list"]
+- apiGroups: ["apiregistration.k8s.io"]
+ resources: ["apiservices"]
+ verbs: ["list", "watch"]
+- apiGroups: ["admissionregistration.k8s.io"]
+ resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"]
+ verbs: ["get", "list", "create", "patch", "delete"]
+- apiGroups: ["rbac.authorization.k8s.io"]
+ resources: ["roles", "rolebindings", "clusterrolebindings", "clusterroles"]
+ verbs: ["*"]
diff --git a/charts/longhorn-1.4.1/templates/clusterrolebinding.yaml b/charts/longhorn-1.4.1/templates/clusterrolebinding.yaml
new file mode 100644
index 0000000..8ab944b
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/clusterrolebinding.yaml
@@ -0,0 +1,27 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: longhorn-bind
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: longhorn-role
+subjects:
+- kind: ServiceAccount
+ name: longhorn-service-account
+ namespace: {{ include "release_namespace" . }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: longhorn-support-bundle
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+- kind: ServiceAccount
+ name: longhorn-support-bundle
+ namespace: {{ include "release_namespace" . }}
diff --git a/charts/longhorn-1.4.1/templates/crds.yaml b/charts/longhorn-1.4.1/templates/crds.yaml
new file mode 100644
index 0000000..0f73824
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/crds.yaml
@@ -0,0 +1,3465 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: backingimagedatasources.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: BackingImageDataSource
+ listKind: BackingImageDataSourceList
+ plural: backingimagedatasources
+ shortNames:
+ - lhbids
+ singular: backingimagedatasource
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The current state of the pod used to provision the backing image file from source
+ jsonPath: .status.currentState
+ name: State
+ type: string
+ - description: The data source type
+ jsonPath: .spec.sourceType
+ name: SourceType
+ type: string
+ - description: The node the backing image file will be prepared on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ - description: The disk the backing image file will be prepared on
+ jsonPath: .spec.diskUUID
+ name: DiskUUID
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: BackingImageDataSource is where Longhorn stores backing image data source object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The system generated UUID of the provisioned backing image file
+ jsonPath: .spec.uuid
+ name: UUID
+ type: string
+ - description: The current state of the pod used to provision the backing image file from source
+ jsonPath: .status.currentState
+ name: State
+ type: string
+ - description: The data source type
+ jsonPath: .spec.sourceType
+ name: SourceType
+ type: string
+ - description: The backing image file size
+ jsonPath: .status.size
+ name: Size
+ type: string
+ - description: The node the backing image file will be prepared on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ - description: The disk the backing image file will be prepared on
+ jsonPath: .spec.diskUUID
+ name: DiskUUID
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: BackingImageDataSource is where Longhorn stores backing image data source object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BackingImageDataSourceSpec defines the desired state of the Longhorn backing image data source
+ properties:
+ checksum:
+ type: string
+ diskPath:
+ type: string
+ diskUUID:
+ type: string
+ fileTransferred:
+ type: boolean
+ nodeID:
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ type: object
+ sourceType:
+ enum:
+ - download
+ - upload
+ - export-from-volume
+ type: string
+ uuid:
+ type: string
+ type: object
+ status:
+ description: BackingImageDataSourceStatus defines the observed state of the Longhorn backing image data source
+ properties:
+ checksum:
+ type: string
+ currentState:
+ type: string
+ ip:
+ type: string
+ message:
+ type: string
+ ownerID:
+ type: string
+ progress:
+ type: integer
+ runningParameters:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ size:
+ format: int64
+ type: integer
+ storageIP:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: backingimagemanagers.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: BackingImageManager
+ listKind: BackingImageManagerList
+ plural: backingimagemanagers
+ shortNames:
+ - lhbim
+ singular: backingimagemanager
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The current state of the manager
+ jsonPath: .status.currentState
+ name: State
+ type: string
+ - description: The image the manager pod will use
+ jsonPath: .spec.image
+ name: Image
+ type: string
+ - description: The node the manager is on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ - description: The disk the manager is responsible for
+ jsonPath: .spec.diskUUID
+ name: DiskUUID
+ type: string
+ - description: The disk path the manager is using
+ jsonPath: .spec.diskPath
+ name: DiskPath
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: BackingImageManager is where Longhorn stores backing image manager object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The current state of the manager
+ jsonPath: .status.currentState
+ name: State
+ type: string
+ - description: The image the manager pod will use
+ jsonPath: .spec.image
+ name: Image
+ type: string
+ - description: The node the manager is on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ - description: The disk the manager is responsible for
+ jsonPath: .spec.diskUUID
+ name: DiskUUID
+ type: string
+ - description: The disk path the manager is using
+ jsonPath: .spec.diskPath
+ name: DiskPath
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: BackingImageManager is where Longhorn stores backing image manager object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BackingImageManagerSpec defines the desired state of the Longhorn backing image manager
+ properties:
+ backingImages:
+ additionalProperties:
+ type: string
+ type: object
+ diskPath:
+ type: string
+ diskUUID:
+ type: string
+ image:
+ type: string
+ nodeID:
+ type: string
+ type: object
+ status:
+ description: BackingImageManagerStatus defines the observed state of the Longhorn backing image manager
+ properties:
+ apiMinVersion:
+ type: integer
+ apiVersion:
+ type: integer
+ backingImageFileMap:
+ additionalProperties:
+ properties:
+ currentChecksum:
+ type: string
+ directory:
+ description: 'Deprecated: This field is useless.'
+ type: string
+ downloadProgress:
+ description: 'Deprecated: This field is renamed to `Progress`.'
+ type: integer
+ message:
+ type: string
+ name:
+ type: string
+ progress:
+ type: integer
+ senderManagerAddress:
+ type: string
+ sendingReference:
+ type: integer
+ size:
+ format: int64
+ type: integer
+ state:
+ type: string
+ url:
+ description: 'Deprecated: This field is useless now. The manager of backing image files doesn''t care if a file is downloaded and how.'
+ type: string
+ uuid:
+ type: string
+ type: object
+ nullable: true
+ type: object
+ currentState:
+ type: string
+ ip:
+ type: string
+ ownerID:
+ type: string
+ storageIP:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: backingimages.longhorn.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ name: longhorn-conversion-webhook
+ namespace: {{ include "release_namespace" . }}
+ path: /v1/webhook/conversion
+ port: 9443
+ conversionReviewVersions:
+ - v1beta2
+ - v1beta1
+ group: longhorn.io
+ names:
+ kind: BackingImage
+ listKind: BackingImageList
+ plural: backingimages
+ shortNames:
+ - lhbi
+ singular: backingimage
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The backing image name
+ jsonPath: .spec.image
+ name: Image
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: BackingImage is where Longhorn stores backing image object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The system generated UUID
+ jsonPath: .status.uuid
+ name: UUID
+ type: string
+ - description: The source of the backing image file data
+ jsonPath: .spec.sourceType
+ name: SourceType
+ type: string
+ - description: The backing image file size in each disk
+ jsonPath: .status.size
+ name: Size
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: BackingImage is where Longhorn stores backing image object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BackingImageSpec defines the desired state of the Longhorn backing image
+ properties:
+ checksum:
+ type: string
+ disks:
+ additionalProperties:
+ type: string
+ type: object
+ imageURL:
+ description: 'Deprecated: This kind of info will be included in the related BackingImageDataSource.'
+ type: string
+ sourceParameters:
+ additionalProperties:
+ type: string
+ type: object
+ sourceType:
+ enum:
+ - download
+ - upload
+ - export-from-volume
+ type: string
+ type: object
+ status:
+ description: BackingImageStatus defines the observed state of the Longhorn backing image status
+ properties:
+ checksum:
+ type: string
+ diskDownloadProgressMap:
+ additionalProperties:
+ type: integer
+ description: 'Deprecated: Replaced by field `Progress` in `DiskFileStatusMap`.'
+ nullable: true
+ type: object
+ diskDownloadStateMap:
+ additionalProperties:
+ description: BackingImageDownloadState is replaced by BackingImageState.
+ type: string
+ description: 'Deprecated: Replaced by field `State` in `DiskFileStatusMap`.'
+ nullable: true
+ type: object
+ diskFileStatusMap:
+ additionalProperties:
+ properties:
+ lastStateTransitionTime:
+ type: string
+ message:
+ type: string
+ progress:
+ type: integer
+ state:
+ type: string
+ type: object
+ nullable: true
+ type: object
+ diskLastRefAtMap:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ ownerID:
+ type: string
+ size:
+ format: int64
+ type: integer
+ uuid:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: backups.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: Backup
+ listKind: BackupList
+ plural: backups
+ shortNames:
+ - lhb
+ singular: backup
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The snapshot name
+ jsonPath: .status.snapshotName
+ name: SnapshotName
+ type: string
+ - description: The snapshot size
+ jsonPath: .status.size
+ name: SnapshotSize
+ type: string
+ - description: The snapshot creation time
+ jsonPath: .status.snapshotCreatedAt
+ name: SnapshotCreatedAt
+ type: string
+ - description: The backup state
+ jsonPath: .status.state
+ name: State
+ type: string
+ - description: The backup last synced time
+ jsonPath: .status.lastSyncedAt
+ name: LastSyncedAt
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: Backup is where Longhorn stores backup object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The snapshot name
+ jsonPath: .status.snapshotName
+ name: SnapshotName
+ type: string
+ - description: The snapshot size
+ jsonPath: .status.size
+ name: SnapshotSize
+ type: string
+ - description: The snapshot creation time
+ jsonPath: .status.snapshotCreatedAt
+ name: SnapshotCreatedAt
+ type: string
+ - description: The backup state
+ jsonPath: .status.state
+ name: State
+ type: string
+ - description: The backup last synced time
+ jsonPath: .status.lastSyncedAt
+ name: LastSyncedAt
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: Backup is where Longhorn stores backup object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BackupSpec defines the desired state of the Longhorn backup
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ description: The labels of snapshot backup.
+ type: object
+ snapshotName:
+ description: The snapshot name.
+ type: string
+ syncRequestedAt:
+ description: The time to request run sync the remote backup.
+ format: date-time
+ nullable: true
+ type: string
+ type: object
+ status:
+ description: BackupStatus defines the observed state of the Longhorn backup
+ properties:
+ backupCreatedAt:
+ description: The snapshot backup upload finished time.
+ type: string
+ error:
+ description: The error message when taking the snapshot backup.
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ description: The labels of snapshot backup.
+ nullable: true
+ type: object
+ lastSyncedAt:
+ description: The last time that the backup was synced with the remote backup target.
+ format: date-time
+ nullable: true
+ type: string
+ messages:
+ additionalProperties:
+ type: string
+ description: The error messages when calling longhorn engine on listing or inspecting backups.
+ nullable: true
+ type: object
+ ownerID:
+ description: The node ID on which the controller is responsible to reconcile this backup CR.
+ type: string
+ progress:
+ description: The snapshot backup progress.
+ type: integer
+ replicaAddress:
+ description: The address of the replica that runs snapshot backup.
+ type: string
+ size:
+ description: The snapshot size.
+ type: string
+ snapshotCreatedAt:
+ description: The snapshot creation time.
+ type: string
+ snapshotName:
+ description: The snapshot name.
+ type: string
+ state:
+ description: The backup creation state. Can be "", "InProgress", "Completed", "Error", "Unknown".
+ type: string
+ url:
+ description: The snapshot backup URL.
+ type: string
+ volumeBackingImageName:
+ description: The volume's backing image name.
+ type: string
+ volumeCreated:
+ description: The volume creation time.
+ type: string
+ volumeName:
+ description: The volume name.
+ type: string
+ volumeSize:
+ description: The volume size.
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: backuptargets.longhorn.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ name: longhorn-conversion-webhook
+ namespace: {{ include "release_namespace" . }}
+ path: /v1/webhook/conversion
+ port: 9443
+ conversionReviewVersions:
+ - v1beta2
+ - v1beta1
+ group: longhorn.io
+ names:
+ kind: BackupTarget
+ listKind: BackupTargetList
+ plural: backuptargets
+ shortNames:
+ - lhbt
+ singular: backuptarget
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The backup target URL
+ jsonPath: .spec.backupTargetURL
+ name: URL
+ type: string
+ - description: The backup target credential secret
+ jsonPath: .spec.credentialSecret
+ name: Credential
+ type: string
+ - description: The backup target poll interval
+ jsonPath: .spec.pollInterval
+ name: LastBackupAt
+ type: string
+ - description: Indicate whether the backup target is available or not
+ jsonPath: .status.available
+ name: Available
+ type: boolean
+ - description: The backup target last synced time
+ jsonPath: .status.lastSyncedAt
+ name: LastSyncedAt
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: BackupTarget is where Longhorn stores backup target object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The backup target URL
+ jsonPath: .spec.backupTargetURL
+ name: URL
+ type: string
+ - description: The backup target credential secret
+ jsonPath: .spec.credentialSecret
+ name: Credential
+ type: string
+ - description: The backup target poll interval
+ jsonPath: .spec.pollInterval
+ name: LastBackupAt
+ type: string
+ - description: Indicate whether the backup target is available or not
+ jsonPath: .status.available
+ name: Available
+ type: boolean
+ - description: The backup target last synced time
+ jsonPath: .status.lastSyncedAt
+ name: LastSyncedAt
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: BackupTarget is where Longhorn stores backup target object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BackupTargetSpec defines the desired state of the Longhorn backup target
+ properties:
+ backupTargetURL:
+ description: The backup target URL.
+ type: string
+ credentialSecret:
+ description: The backup target credential secret.
+ type: string
+ pollInterval:
+ description: The interval that the cluster needs to run sync with the backup target.
+ type: string
+ syncRequestedAt:
+ description: The time to request run sync the remote backup target.
+ format: date-time
+ nullable: true
+ type: string
+ type: object
+ status:
+ description: BackupTargetStatus defines the observed state of the Longhorn backup target
+ properties:
+ available:
+ description: Available indicates if the remote backup target is available or not.
+ type: boolean
+ conditions:
+ description: Records the reason on why the backup target is unavailable.
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ lastSyncedAt:
+ description: The last time that the controller synced with the remote backup target.
+ format: date-time
+ nullable: true
+ type: string
+ ownerID:
+ description: The node ID on which the controller is responsible to reconcile this backup target CR.
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: backupvolumes.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: BackupVolume
+ listKind: BackupVolumeList
+ plural: backupvolumes
+ shortNames:
+ - lhbv
+ singular: backupvolume
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The backup volume creation time
+ jsonPath: .status.createdAt
+ name: CreatedAt
+ type: string
+ - description: The backup volume last backup name
+ jsonPath: .status.lastBackupName
+ name: LastBackupName
+ type: string
+ - description: The backup volume last backup time
+ jsonPath: .status.lastBackupAt
+ name: LastBackupAt
+ type: string
+ - description: The backup volume last synced time
+ jsonPath: .status.lastSyncedAt
+ name: LastSyncedAt
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: BackupVolume is where Longhorn stores backup volume object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The backup volume creation time
+ jsonPath: .status.createdAt
+ name: CreatedAt
+ type: string
+ - description: The backup volume last backup name
+ jsonPath: .status.lastBackupName
+ name: LastBackupName
+ type: string
+ - description: The backup volume last backup time
+ jsonPath: .status.lastBackupAt
+ name: LastBackupAt
+ type: string
+ - description: The backup volume last synced time
+ jsonPath: .status.lastSyncedAt
+ name: LastSyncedAt
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: BackupVolume is where Longhorn stores backup volume object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BackupVolumeSpec defines the desired state of the Longhorn backup volume
+ properties:
+ syncRequestedAt:
+ description: The time to request run sync the remote backup volume.
+ format: date-time
+ nullable: true
+ type: string
+ type: object
+ status:
+ description: BackupVolumeStatus defines the observed state of the Longhorn backup volume
+ properties:
+ backingImageChecksum:
+ description: the backing image checksum.
+ type: string
+ backingImageName:
+ description: The backing image name.
+ type: string
+ createdAt:
+ description: The backup volume creation time.
+ type: string
+ dataStored:
+ description: The backup volume block count.
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ description: The backup volume labels.
+ nullable: true
+ type: object
+ lastBackupAt:
+ description: The latest volume backup time.
+ type: string
+ lastBackupName:
+ description: The latest volume backup name.
+ type: string
+ lastModificationTime:
+ description: The backup volume config last modification time.
+ format: date-time
+ nullable: true
+ type: string
+ lastSyncedAt:
+ description: The last time that the backup volume was synced into the cluster.
+ format: date-time
+ nullable: true
+ type: string
+ messages:
+ additionalProperties:
+ type: string
+ description: The error messages when call longhorn engine on list or inspect backup volumes.
+ nullable: true
+ type: object
+ ownerID:
+ description: The node ID on which the controller is responsible to reconcile this backup volume CR.
+ type: string
+ size:
+ description: The backup volume size.
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: engineimages.longhorn.io
+spec:
+ preserveUnknownFields: false
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ name: longhorn-conversion-webhook
+ namespace: {{ include "release_namespace" . }}
+ path: /v1/webhook/conversion
+ port: 9443
+ conversionReviewVersions:
+ - v1beta2
+ - v1beta1
+ group: longhorn.io
+ names:
+ kind: EngineImage
+ listKind: EngineImageList
+ plural: engineimages
+ shortNames:
+ - lhei
+ singular: engineimage
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: State of the engine image
+ jsonPath: .status.state
+ name: State
+ type: string
+ - description: The Longhorn engine image
+ jsonPath: .spec.image
+ name: Image
+ type: string
+ - description: Number of resources using the engine image
+ jsonPath: .status.refCount
+ name: RefCount
+ type: integer
+ - description: The build date of the engine image
+ jsonPath: .status.buildDate
+ name: BuildDate
+ type: date
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: EngineImage is where Longhorn stores engine image object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: State of the engine image
+ jsonPath: .status.state
+ name: State
+ type: string
+ - description: The Longhorn engine image
+ jsonPath: .spec.image
+ name: Image
+ type: string
+ - description: Number of resources using the engine image
+ jsonPath: .status.refCount
+ name: RefCount
+ type: integer
+ - description: The build date of the engine image
+ jsonPath: .status.buildDate
+ name: BuildDate
+ type: date
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: EngineImage is where Longhorn stores engine image object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: EngineImageSpec defines the desired state of the Longhorn engine image
+ properties:
+ image:
+ minLength: 1
+ type: string
+ required:
+ - image
+ type: object
+ status:
+ description: EngineImageStatus defines the observed state of the Longhorn engine image
+ properties:
+ buildDate:
+ type: string
+ cliAPIMinVersion:
+ type: integer
+ cliAPIVersion:
+ type: integer
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ controllerAPIMinVersion:
+ type: integer
+ controllerAPIVersion:
+ type: integer
+ dataFormatMinVersion:
+ type: integer
+ dataFormatVersion:
+ type: integer
+ gitCommit:
+ type: string
+ noRefSince:
+ type: string
+ nodeDeploymentMap:
+ additionalProperties:
+ type: boolean
+ nullable: true
+ type: object
+ ownerID:
+ type: string
+ refCount:
+ type: integer
+ state:
+ type: string
+ version:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: engines.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: Engine
+ listKind: EngineList
+ plural: engines
+ shortNames:
+ - lhe
+ singular: engine
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The current state of the engine
+ jsonPath: .status.currentState
+ name: State
+ type: string
+ - description: The node that the engine is on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ - description: The instance manager of the engine
+ jsonPath: .status.instanceManagerName
+ name: InstanceManager
+ type: string
+ - description: The current image of the engine
+ jsonPath: .status.currentImage
+ name: Image
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: Engine is where Longhorn stores engine object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The current state of the engine
+ jsonPath: .status.currentState
+ name: State
+ type: string
+ - description: The node that the engine is on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ - description: The instance manager of the engine
+ jsonPath: .status.instanceManagerName
+ name: InstanceManager
+ type: string
+ - description: The current image of the engine
+ jsonPath: .status.currentImage
+ name: Image
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: Engine is where Longhorn stores engine object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: EngineSpec defines the desired state of the Longhorn engine
+ properties:
+ active:
+ type: boolean
+ backupVolume:
+ type: string
+ desireState:
+ type: string
+ disableFrontend:
+ type: boolean
+ engineImage:
+ type: string
+ frontend:
+ enum:
+ - blockdev
+ - iscsi
+ - ""
+ type: string
+ logRequested:
+ type: boolean
+ nodeID:
+ type: string
+ replicaAddressMap:
+ additionalProperties:
+ type: string
+ type: object
+ requestedBackupRestore:
+ type: string
+ requestedDataSource:
+ type: string
+ revisionCounterDisabled:
+ type: boolean
+ salvageRequested:
+ type: boolean
+ unmapMarkSnapChainRemovedEnabled:
+ type: boolean
+ upgradedReplicaAddressMap:
+ additionalProperties:
+ type: string
+ type: object
+ volumeName:
+ type: string
+ volumeSize:
+ format: int64
+ type: string
+ type: object
+ status:
+ description: EngineStatus defines the observed state of the Longhorn engine
+ properties:
+ backupStatus:
+ additionalProperties:
+ properties:
+ backupURL:
+ type: string
+ error:
+ type: string
+ progress:
+ type: integer
+ replicaAddress:
+ type: string
+ snapshotName:
+ type: string
+ state:
+ type: string
+ type: object
+ nullable: true
+ type: object
+ cloneStatus:
+ additionalProperties:
+ properties:
+ error:
+ type: string
+ fromReplicaAddress:
+ type: string
+ isCloning:
+ type: boolean
+ progress:
+ type: integer
+ snapshotName:
+ type: string
+ state:
+ type: string
+ type: object
+ nullable: true
+ type: object
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ currentImage:
+ type: string
+ currentReplicaAddressMap:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ currentSize:
+ format: int64
+ type: string
+ currentState:
+ type: string
+ endpoint:
+ type: string
+ instanceManagerName:
+ type: string
+ ip:
+ type: string
+ isExpanding:
+ type: boolean
+ lastExpansionError:
+ type: string
+ lastExpansionFailedAt:
+ type: string
+ lastRestoredBackup:
+ type: string
+ logFetched:
+ type: boolean
+ ownerID:
+ type: string
+ port:
+ type: integer
+ purgeStatus:
+ additionalProperties:
+ properties:
+ error:
+ type: string
+ isPurging:
+ type: boolean
+ progress:
+ type: integer
+ state:
+ type: string
+ type: object
+ nullable: true
+ type: object
+ rebuildStatus:
+ additionalProperties:
+ properties:
+ error:
+ type: string
+ fromReplicaAddress:
+ type: string
+ isRebuilding:
+ type: boolean
+ progress:
+ type: integer
+ state:
+ type: string
+ type: object
+ nullable: true
+ type: object
+ replicaModeMap:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ restoreStatus:
+ additionalProperties:
+ properties:
+ backupURL:
+ type: string
+ currentRestoringBackup:
+ type: string
+ error:
+ type: string
+ filename:
+ type: string
+ isRestoring:
+ type: boolean
+ lastRestored:
+ type: string
+ progress:
+ type: integer
+ state:
+ type: string
+ type: object
+ nullable: true
+ type: object
+ salvageExecuted:
+ type: boolean
+ snapshots:
+ additionalProperties:
+ properties:
+ children:
+ additionalProperties:
+ type: boolean
+ nullable: true
+ type: object
+ created:
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ name:
+ type: string
+ parent:
+ type: string
+ removed:
+ type: boolean
+ size:
+ type: string
+ usercreated:
+ type: boolean
+ type: object
+ nullable: true
+ type: object
+ snapshotsError:
+ type: string
+ started:
+ type: boolean
+ storageIP:
+ type: string
+ unmapMarkSnapChainRemovedEnabled:
+ type: boolean
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: instancemanagers.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: InstanceManager
+ listKind: InstanceManagerList
+ plural: instancemanagers
+ shortNames:
+ - lhim
+ singular: instancemanager
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The state of the instance manager
+ jsonPath: .status.currentState
+ name: State
+ type: string
+ - description: The type of the instance manager (engine or replica)
+ jsonPath: .spec.type
+ name: Type
+ type: string
+ - description: The node that the instance manager is running on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: InstanceManager is where Longhorn stores instance manager object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The state of the instance manager
+ jsonPath: .status.currentState
+ name: State
+ type: string
+ - description: The type of the instance manager (engine or replica)
+ jsonPath: .spec.type
+ name: Type
+ type: string
+ - description: The node that the instance manager is running on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: InstanceManager is where Longhorn stores instance manager object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: InstanceManagerSpec defines the desired state of the Longhorn instancer manager
+ properties:
+ engineImage:
+ description: 'Deprecated: This field is useless.'
+ type: string
+ image:
+ type: string
+ nodeID:
+ type: string
+ type:
+ enum:
+ - engine
+ - replica
+ type: string
+ type: object
+ status:
+ description: InstanceManagerStatus defines the observed state of the Longhorn instance manager
+ properties:
+ apiMinVersion:
+ type: integer
+ apiVersion:
+ type: integer
+ proxyApiMinVersion:
+ type: integer
+ proxyApiVersion:
+ type: integer
+ currentState:
+ type: string
+ instances:
+ additionalProperties:
+ properties:
+ spec:
+ properties:
+ name:
+ type: string
+ type: object
+ status:
+ properties:
+ endpoint:
+ type: string
+ errorMsg:
+ type: string
+ listen:
+ type: string
+ portEnd:
+ format: int32
+ type: integer
+ portStart:
+ format: int32
+ type: integer
+ resourceVersion:
+ format: int64
+ type: integer
+ state:
+ type: string
+ type:
+ type: string
+ type: object
+ type: object
+ nullable: true
+ type: object
+ ip:
+ type: string
+ ownerID:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: nodes.longhorn.io
+spec:
+ preserveUnknownFields: false
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ name: longhorn-conversion-webhook
+ namespace: {{ include "release_namespace" . }}
+ path: /v1/webhook/conversion
+ port: 9443
+ conversionReviewVersions:
+ - v1beta2
+ - v1beta1
+ group: longhorn.io
+ names:
+ kind: Node
+ listKind: NodeList
+ plural: nodes
+ shortNames:
+ - lhn
+ singular: node
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Indicate whether the node is ready
+ jsonPath: .status.conditions['Ready']['status']
+ name: Ready
+ type: string
+ - description: Indicate whether the user disabled/enabled replica scheduling for the node
+ jsonPath: .spec.allowScheduling
+ name: AllowScheduling
+ type: boolean
+ - description: Indicate whether Longhorn can schedule replicas on the node
+ jsonPath: .status.conditions['Schedulable']['status']
+ name: Schedulable
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: Node is where Longhorn stores Longhorn node object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: Indicate whether the node is ready
+ jsonPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - description: Indicate whether the user disabled/enabled replica scheduling for the node
+ jsonPath: .spec.allowScheduling
+ name: AllowScheduling
+ type: boolean
+ - description: Indicate whether Longhorn can schedule replicas on the node
+ jsonPath: .status.conditions[?(@.type=='Schedulable')].status
+ name: Schedulable
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: Node is where Longhorn stores Longhorn node object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: NodeSpec defines the desired state of the Longhorn node
+ properties:
+ allowScheduling:
+ type: boolean
+ disks:
+ additionalProperties:
+ properties:
+ allowScheduling:
+ type: boolean
+ evictionRequested:
+ type: boolean
+ path:
+ type: string
+ storageReserved:
+ format: int64
+ type: integer
+ tags:
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ engineManagerCPURequest:
+ type: integer
+ evictionRequested:
+ type: boolean
+ name:
+ type: string
+ replicaManagerCPURequest:
+ type: integer
+ tags:
+ items:
+ type: string
+ type: array
+ type: object
+ status:
+ description: NodeStatus defines the observed state of the Longhorn node
+ properties:
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ diskStatus:
+ additionalProperties:
+ properties:
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ diskUUID:
+ type: string
+ scheduledReplica:
+ additionalProperties:
+ format: int64
+ type: integer
+ nullable: true
+ type: object
+ storageAvailable:
+ format: int64
+ type: integer
+ storageMaximum:
+ format: int64
+ type: integer
+ storageScheduled:
+ format: int64
+ type: integer
+ type: object
+ nullable: true
+ type: object
+ region:
+ type: string
+ snapshotCheckStatus:
+ properties:
+ lastPeriodicCheckedAt:
+ format: date-time
+ type: string
+ snapshotCheckState:
+ type: string
+ type: object
+ zone:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: orphans.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: Orphan
+ listKind: OrphanList
+ plural: orphans
+ shortNames:
+ - lho
+ singular: orphan
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The type of the orphan
+ jsonPath: .spec.orphanType
+ name: Type
+ type: string
+ - description: The node that the orphan is on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: Orphan is where Longhorn stores orphan object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: OrphanSpec defines the desired state of the Longhorn orphaned data
+ properties:
+ nodeID:
+ description: The node ID on which the controller is responsible to reconcile this orphan CR.
+ type: string
+ orphanType:
+ description: The type of the orphaned data. Can be "replica".
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: The parameters of the orphaned data
+ type: object
+ type: object
+ status:
+ description: OrphanStatus defines the observed state of the Longhorn orphaned data
+ properties:
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ ownerID:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels:
+ longhorn-manager: ""
+ name: recurringjobs.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: RecurringJob
+ listKind: RecurringJobList
+ plural: recurringjobs
+ shortNames:
+ - lhrj
+ singular: recurringjob
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Sets groupings to the jobs. When set to "default" group will be added to the volume label when no other job label exist in volume
+ jsonPath: .spec.groups
+ name: Groups
+ type: string
+ - description: Should be one of "backup" or "snapshot"
+ jsonPath: .spec.task
+ name: Task
+ type: string
+ - description: The cron expression represents recurring job scheduling
+ jsonPath: .spec.cron
+ name: Cron
+ type: string
+ - description: The number of snapshots/backups to keep for the volume
+ jsonPath: .spec.retain
+ name: Retain
+ type: integer
+ - description: The concurrent job to run by each cron job
+ jsonPath: .spec.concurrency
+ name: Concurrency
+ type: integer
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - description: Specify the labels
+ jsonPath: .spec.labels
+ name: Labels
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: RecurringJob is where Longhorn stores recurring job object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: Sets groupings to the jobs. When set to "default" group will be added to the volume label when no other job label exist in volume
+ jsonPath: .spec.groups
+ name: Groups
+ type: string
+ - description: Should be one of "snapshot", "snapshot-cleanup", "snapshot-delete" or "backup"
+ jsonPath: .spec.task
+ name: Task
+ type: string
+ - description: The cron expression represents recurring job scheduling
+ jsonPath: .spec.cron
+ name: Cron
+ type: string
+ - description: The number of snapshots/backups to keep for the volume
+ jsonPath: .spec.retain
+ name: Retain
+ type: integer
+ - description: The concurrent job to run by each cron job
+ jsonPath: .spec.concurrency
+ name: Concurrency
+ type: integer
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - description: Specify the labels
+ jsonPath: .spec.labels
+ name: Labels
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: RecurringJob is where Longhorn stores recurring job object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: RecurringJobSpec defines the desired state of the Longhorn recurring job
+ properties:
+ concurrency:
+ description: The concurrency of taking the snapshot/backup.
+ type: integer
+ cron:
+ description: The cron setting.
+ type: string
+ groups:
+ description: The recurring job group.
+ items:
+ type: string
+ type: array
+ labels:
+ additionalProperties:
+ type: string
+ description: The label of the snapshot/backup.
+ type: object
+ name:
+ description: The recurring job name.
+ type: string
+ retain:
+ description: The retain count of the snapshot/backup.
+ type: integer
+ task:
+ description: The recurring job task. Can be "snapshot", "snapshot-cleanup", "snapshot-delete" or "backup".
+ enum:
+ - snapshot
+ - snapshot-cleanup
+ - snapshot-delete
+ - backup
+ type: string
+ type: object
+ status:
+ description: RecurringJobStatus defines the observed state of the Longhorn recurring job
+ properties:
+ ownerID:
+ description: The owner ID which is responsible to reconcile this recurring job CR.
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: replicas.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: Replica
+ listKind: ReplicaList
+ plural: replicas
+ shortNames:
+ - lhr
+ singular: replica
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The current state of the replica
+ jsonPath: .status.currentState
+ name: State
+ type: string
+ - description: The node that the replica is on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ - description: The disk that the replica is on
+ jsonPath: .spec.diskID
+ name: Disk
+ type: string
+ - description: The instance manager of the replica
+ jsonPath: .status.instanceManagerName
+ name: InstanceManager
+ type: string
+ - description: The current image of the replica
+ jsonPath: .status.currentImage
+ name: Image
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: Replica is where Longhorn stores replica object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The current state of the replica
+ jsonPath: .status.currentState
+ name: State
+ type: string
+ - description: The node that the replica is on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ - description: The disk that the replica is on
+ jsonPath: .spec.diskID
+ name: Disk
+ type: string
+ - description: The instance manager of the replica
+ jsonPath: .status.instanceManagerName
+ name: InstanceManager
+ type: string
+ - description: The current image of the replica
+ jsonPath: .status.currentImage
+ name: Image
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: Replica is where Longhorn stores replica object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ReplicaSpec defines the desired state of the Longhorn replica
+ properties:
+ active:
+ type: boolean
+ backingImage:
+ type: string
+ baseImage:
+ description: Deprecated. Rename to BackingImage
+ type: string
+ dataDirectoryName:
+ type: string
+ dataPath:
+ description: Deprecated
+ type: string
+ desireState:
+ type: string
+ diskID:
+ type: string
+ diskPath:
+ type: string
+ engineImage:
+ type: string
+ engineName:
+ type: string
+ failedAt:
+ type: string
+ hardNodeAffinity:
+ type: string
+ healthyAt:
+ type: string
+ logRequested:
+ type: boolean
+ nodeID:
+ type: string
+ rebuildRetryCount:
+ type: integer
+ revisionCounterDisabled:
+ type: boolean
+ salvageRequested:
+ type: boolean
+ unmapMarkDiskChainRemovedEnabled:
+ type: boolean
+ volumeName:
+ type: string
+ volumeSize:
+ format: int64
+ type: string
+ type: object
+ status:
+ description: ReplicaStatus defines the observed state of the Longhorn replica
+ properties:
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ currentImage:
+ type: string
+ currentState:
+ type: string
+ evictionRequested:
+ type: boolean
+ instanceManagerName:
+ type: string
+ ip:
+ type: string
+ logFetched:
+ type: boolean
+ ownerID:
+ type: string
+ port:
+ type: integer
+ salvageExecuted:
+ type: boolean
+ started:
+ type: boolean
+ storageIP:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: settings.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: Setting
+ listKind: SettingList
+ plural: settings
+ shortNames:
+ - lhs
+ singular: setting
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The value of the setting
+ jsonPath: .value
+ name: Value
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: Setting is where Longhorn stores setting object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ value:
+ type: string
+ required:
+ - value
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The value of the setting
+ jsonPath: .value
+ name: Value
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: Setting is where Longhorn stores setting object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ value:
+ type: string
+ required:
+ - value
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: sharemanagers.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: ShareManager
+ listKind: ShareManagerList
+ plural: sharemanagers
+ shortNames:
+ - lhsm
+ singular: sharemanager
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The state of the share manager
+ jsonPath: .status.state
+ name: State
+ type: string
+ - description: The node that the share manager is owned by
+ jsonPath: .status.ownerID
+ name: Node
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: ShareManager is where Longhorn stores share manager object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The state of the share manager
+ jsonPath: .status.state
+ name: State
+ type: string
+ - description: The node that the share manager is owned by
+ jsonPath: .status.ownerID
+ name: Node
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: ShareManager is where Longhorn stores share manager object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ShareManagerSpec defines the desired state of the Longhorn share manager
+ properties:
+ image:
+ type: string
+ type: object
+ status:
+ description: ShareManagerStatus defines the observed state of the Longhorn share manager
+ properties:
+ endpoint:
+ type: string
+ ownerID:
+ type: string
+ state:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: snapshots.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: Snapshot
+ listKind: SnapshotList
+ plural: snapshots
+ shortNames:
+ - lhsnap
+ singular: snapshot
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The volume that this snapshot belongs to
+ jsonPath: .spec.volume
+ name: Volume
+ type: string
+ - description: Timestamp when the point-in-time snapshot was taken
+ jsonPath: .status.creationTime
+ name: CreationTime
+ type: string
+ - description: Indicates if the snapshot is ready to be used to restore/backup a volume
+ jsonPath: .status.readyToUse
+ name: ReadyToUse
+ type: boolean
+ - description: Represents the minimum size of volume required to rehydrate from this snapshot
+ jsonPath: .status.restoreSize
+ name: RestoreSize
+ type: string
+ - description: The actual size of the snapshot
+ jsonPath: .status.size
+ name: Size
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: Snapshot is the Schema for the snapshots API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SnapshotSpec defines the desired state of Longhorn Snapshot
+ properties:
+ createSnapshot:
+ description: require creating a new snapshot
+ type: boolean
+ labels:
+ additionalProperties:
+ type: string
+ description: The labels of snapshot
+ nullable: true
+ type: object
+ volume:
+ description: the volume that this snapshot belongs to. This field is immutable after creation. Required
+ type: string
+ required:
+ - volume
+ type: object
+ status:
+ description: SnapshotStatus defines the observed state of Longhorn Snapshot
+ properties:
+ checksum:
+ type: string
+ children:
+ additionalProperties:
+ type: boolean
+ nullable: true
+ type: object
+ creationTime:
+ type: string
+ error:
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ markRemoved:
+ type: boolean
+ ownerID:
+ type: string
+ parent:
+ type: string
+ readyToUse:
+ type: boolean
+ restoreSize:
+ format: int64
+ type: integer
+ size:
+ format: int64
+ type: integer
+ userCreated:
+ type: boolean
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: supportbundles.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: SupportBundle
+ listKind: SupportBundleList
+ plural: supportbundles
+ shortNames:
+ - lhbundle
+ singular: supportbundle
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The state of the support bundle
+ jsonPath: .status.state
+ name: State
+ type: string
+ - description: The issue URL
+ jsonPath: .spec.issueURL
+ name: Issue
+ type: string
+ - description: A brief description of the issue
+ jsonPath: .spec.description
+ name: Description
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: SupportBundle is where Longhorn stores support bundle object
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SupportBundleSpec defines the desired state of the Longhorn SupportBundle
+ properties:
+ description:
+ description: A brief description of the issue
+ type: string
+ issueURL:
+ description: The issue URL
+ nullable: true
+ type: string
+ nodeID:
+ description: The preferred responsible controller node ID.
+ type: string
+ required:
+ - description
+ type: object
+ status:
+ description: SupportBundleStatus defines the observed state of the Longhorn SupportBundle
+ properties:
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ type: array
+ filename:
+ type: string
+ filesize:
+ format: int64
+ type: integer
+ image:
+ description: The support bundle manager image
+ type: string
+ managerIP:
+ description: The support bundle manager IP
+ type: string
+ ownerID:
+ description: The current responsible controller node ID
+ type: string
+ progress:
+ type: integer
+ state:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: systembackups.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: SystemBackup
+ listKind: SystemBackupList
+ plural: systembackups
+ shortNames:
+ - lhsb
+ singular: systembackup
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The system backup Longhorn version
+ jsonPath: .status.version
+ name: Version
+ type: string
+ - description: The system backup state
+ jsonPath: .status.state
+ name: State
+ type: string
+ - description: The system backup creation time
+ jsonPath: .status.createdAt
+ name: Created
+ type: string
+ - description: The last time that the system backup was synced into the cluster
+ jsonPath: .status.lastSyncedAt
+ name: LastSyncedAt
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: SystemBackup is where Longhorn stores system backup object
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SystemBackupSpec defines the desired state of the Longhorn SystemBackup
+ type: object
+ status:
+ description: SystemBackupStatus defines the observed state of the Longhorn SystemBackup
+ properties:
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ createdAt:
+ description: The system backup creation time.
+ format: date-time
+ type: string
+ gitCommit:
+ description: The saved Longhorn manager git commit.
+ nullable: true
+ type: string
+ lastSyncedAt:
+ description: The last time that the system backup was synced into the cluster.
+ format: date-time
+ nullable: true
+ type: string
+ managerImage:
+ description: The saved manager image.
+ type: string
+ ownerID:
+ description: The node ID of the responsible controller to reconcile this SystemBackup.
+ type: string
+ state:
+ description: The system backup state.
+ type: string
+ version:
+ description: The saved Longhorn version.
+ nullable: true
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: systemrestores.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: SystemRestore
+ listKind: SystemRestoreList
+ plural: systemrestores
+ shortNames:
+ - lhsr
+ singular: systemrestore
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The system restore state
+ jsonPath: .status.state
+ name: State
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: SystemRestore is where Longhorn stores system restore object
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SystemRestoreSpec defines the desired state of the Longhorn SystemRestore
+ properties:
+ systemBackup:
+ description: The system backup name in the object store.
+ type: string
+ required:
+ - systemBackup
+ type: object
+ status:
+ description: SystemRestoreStatus defines the observed state of the Longhorn SystemRestore
+ properties:
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ ownerID:
+ description: The node ID of the responsible controller to reconcile this SystemRestore.
+ type: string
+ sourceURL:
+ description: The source system backup URL.
+ type: string
+ state:
+ description: The system restore state.
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: volumes.longhorn.io
+spec:
+ preserveUnknownFields: false
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ name: longhorn-conversion-webhook
+ namespace: {{ include "release_namespace" . }}
+ path: /v1/webhook/conversion
+ port: 9443
+ conversionReviewVersions:
+ - v1beta2
+ - v1beta1
+ group: longhorn.io
+ names:
+ kind: Volume
+ listKind: VolumeList
+ plural: volumes
+ shortNames:
+ - lhv
+ singular: volume
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The state of the volume
+ jsonPath: .status.state
+ name: State
+ type: string
+ - description: The robustness of the volume
+ jsonPath: .status.robustness
+ name: Robustness
+ type: string
+ - description: The scheduled condition of the volume
+ jsonPath: .status.conditions['scheduled']['status']
+ name: Scheduled
+ type: string
+ - description: The size of the volume
+ jsonPath: .spec.size
+ name: Size
+ type: string
+ - description: The node that the volume is currently attaching to
+ jsonPath: .status.currentNodeID
+ name: Node
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: Volume is where Longhorn stores volume object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The state of the volume
+ jsonPath: .status.state
+ name: State
+ type: string
+ - description: The robustness of the volume
+ jsonPath: .status.robustness
+ name: Robustness
+ type: string
+ - description: The scheduled condition of the volume
+ jsonPath: .status.conditions[?(@.type=='Schedulable')].status
+ name: Scheduled
+ type: string
+ - description: The size of the volume
+ jsonPath: .spec.size
+ name: Size
+ type: string
+ - description: The node that the volume is currently attaching to
+ jsonPath: .status.currentNodeID
+ name: Node
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: Volume is where Longhorn stores volume object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: VolumeSpec defines the desired state of the Longhorn volume
+ properties:
+ Standby:
+ type: boolean
+ accessMode:
+ enum:
+ - rwo
+ - rwx
+ type: string
+ backingImage:
+ type: string
+ baseImage:
+ description: Deprecated. Rename to BackingImage
+ type: string
+ dataLocality:
+ enum:
+ - disabled
+ - best-effort
+ - strict-local
+ type: string
+ dataSource:
+ type: string
+ disableFrontend:
+ type: boolean
+ diskSelector:
+ items:
+ type: string
+ type: array
+ encrypted:
+ type: boolean
+ engineImage:
+ type: string
+ fromBackup:
+ type: string
+ restoreVolumeRecurringJob:
+ enum:
+ - ignored
+ - enabled
+ - disabled
+ type: string
+ frontend:
+ enum:
+ - blockdev
+ - iscsi
+ - ""
+ type: string
+ lastAttachedBy:
+ type: string
+ migratable:
+ type: boolean
+ migrationNodeID:
+ type: string
+ nodeID:
+ type: string
+ nodeSelector:
+ items:
+ type: string
+ type: array
+ numberOfReplicas:
+ type: integer
+ recurringJobs:
+ description: Deprecated. Replaced by a separate resource named "RecurringJob"
+ items:
+ description: 'Deprecated: This field is useless and has been replaced by the RecurringJob CRD'
+ properties:
+ concurrency:
+ type: integer
+ cron:
+ type: string
+ groups:
+ items:
+ type: string
+ type: array
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ name:
+ type: string
+ retain:
+ type: integer
+ task:
+ enum:
+ - snapshot
+ - snapshot-cleanup
+ - snapshot-delete
+ - backup
+ type: string
+ type: object
+ type: array
+ replicaAutoBalance:
+ enum:
+ - ignored
+ - disabled
+ - least-effort
+ - best-effort
+ type: string
+ revisionCounterDisabled:
+ type: boolean
+ size:
+ format: int64
+ type: string
+ snapshotDataIntegrity:
+ enum:
+ - ignored
+ - disabled
+ - enabled
+ - fast-check
+ type: string
+ staleReplicaTimeout:
+ type: integer
+ unmapMarkSnapChainRemoved:
+ enum:
+ - ignored
+ - disabled
+ - enabled
+ type: string
+ type: object
+ status:
+ description: VolumeStatus defines the observed state of the Longhorn volume
+ properties:
+ actualSize:
+ format: int64
+ type: integer
+ cloneStatus:
+ properties:
+ snapshot:
+ type: string
+ sourceVolume:
+ type: string
+ state:
+ type: string
+ type: object
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ currentImage:
+ type: string
+ currentNodeID:
+ type: string
+ expansionRequired:
+ type: boolean
+ frontendDisabled:
+ type: boolean
+ isStandby:
+ type: boolean
+ kubernetesStatus:
+ properties:
+ lastPVCRefAt:
+ type: string
+ lastPodRefAt:
+ type: string
+ namespace:
+ description: determine if PVC/Namespace is history or not
+ type: string
+ pvName:
+ type: string
+ pvStatus:
+ type: string
+ pvcName:
+ type: string
+ workloadsStatus:
+ description: determine if Pod/Workload is history or not
+ items:
+ properties:
+ podName:
+ type: string
+ podStatus:
+ type: string
+ workloadName:
+ type: string
+ workloadType:
+ type: string
+ type: object
+ nullable: true
+ type: array
+ type: object
+ lastBackup:
+ type: string
+ lastBackupAt:
+ type: string
+ lastDegradedAt:
+ type: string
+ ownerID:
+ type: string
+ pendingNodeID:
+ type: string
+ remountRequestedAt:
+ type: string
+ restoreInitiated:
+ type: boolean
+ restoreRequired:
+ type: boolean
+ robustness:
+ type: string
+ shareEndpoint:
+ type: string
+ shareState:
+ type: string
+ state:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/charts/longhorn-1.4.1/templates/daemonset-sa.yaml b/charts/longhorn-1.4.1/templates/daemonset-sa.yaml
new file mode 100644
index 0000000..63f98cd
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/daemonset-sa.yaml
@@ -0,0 +1,147 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ app: longhorn-manager
+ name: longhorn-manager
+ namespace: {{ include "release_namespace" . }}
+spec:
+ selector:
+ matchLabels:
+ app: longhorn-manager
+ template:
+ metadata:
+ labels: {{- include "longhorn.labels" . | nindent 8 }}
+ app: longhorn-manager
+ {{- with .Values.annotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ spec:
+ initContainers:
+ - name: wait-longhorn-admission-webhook
+ image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
+ command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" -k https://longhorn-admission-webhook:9443/v1/healthz) != "200" ]; do echo waiting; sleep 2; done']
+ containers:
+ - name: longhorn-manager
+ image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ securityContext:
+ privileged: true
+ command:
+ - longhorn-manager
+ - -d
+ {{- if eq .Values.longhornManager.log.format "json" }}
+ - -j
+ {{- end }}
+ - daemon
+ - --engine-image
+ - "{{ template "registry_url" . }}{{ .Values.image.longhorn.engine.repository }}:{{ .Values.image.longhorn.engine.tag }}"
+ - --instance-manager-image
+ - "{{ template "registry_url" . }}{{ .Values.image.longhorn.instanceManager.repository }}:{{ .Values.image.longhorn.instanceManager.tag }}"
+ - --share-manager-image
+ - "{{ template "registry_url" . }}{{ .Values.image.longhorn.shareManager.repository }}:{{ .Values.image.longhorn.shareManager.tag }}"
+ - --backing-image-manager-image
+ - "{{ template "registry_url" . }}{{ .Values.image.longhorn.backingImageManager.repository }}:{{ .Values.image.longhorn.backingImageManager.tag }}"
+ - --support-bundle-manager-image
+ - "{{ template "registry_url" . }}{{ .Values.image.longhorn.supportBundleKit.repository }}:{{ .Values.image.longhorn.supportBundleKit.tag }}"
+ - --manager-image
+ - "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}"
+ - --service-account
+ - longhorn-service-account
+ ports:
+ - containerPort: 9500
+ name: manager
+ readinessProbe:
+ tcpSocket:
+ port: 9500
+ volumeMounts:
+ - name: dev
+ mountPath: /host/dev/
+ - name: proc
+ mountPath: /host/proc/
+ - name: longhorn
+ mountPath: /var/lib/longhorn/
+ mountPropagation: Bidirectional
+ - name: longhorn-grpc-tls
+ mountPath: /tls-files/
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ volumes:
+ - name: dev
+ hostPath:
+ path: /dev/
+ - name: proc
+ hostPath:
+ path: /proc/
+ - name: longhorn
+ hostPath:
+ path: /var/lib/longhorn/
+ - name: longhorn-grpc-tls
+ secret:
+ secretName: longhorn-grpc-tls
+ optional: true
+ {{- if .Values.privateRegistry.registrySecret }}
+ imagePullSecrets:
+ - name: {{ .Values.privateRegistry.registrySecret }}
+ {{- end }}
+ {{- if .Values.longhornManager.priorityClass }}
+ priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
+ {{- end }}
+ {{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
+ tolerations:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
+{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
+ {{- end }}
+ {{- if .Values.longhornManager.tolerations }}
+{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
+ {{- end }}
+ {{- end }}
+ {{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
+ nodeSelector:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
+{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.longhornManager.nodeSelector }}
+{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- end }}
+ serviceAccountName: longhorn-service-account
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: "100%"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ app: longhorn-manager
+ name: longhorn-backend
+ namespace: {{ include "release_namespace" . }}
+ {{- if .Values.longhornManager.serviceAnnotations }}
+ annotations:
+{{ toYaml .Values.longhornManager.serviceAnnotations | indent 4 }}
+ {{- end }}
+spec:
+ type: {{ .Values.service.manager.type }}
+ sessionAffinity: ClientIP
+ selector:
+ app: longhorn-manager
+ ports:
+ - name: manager
+ port: 9500
+ targetPort: manager
+ {{- if .Values.service.manager.nodePort }}
+ nodePort: {{ .Values.service.manager.nodePort }}
+ {{- end }}
diff --git a/charts/longhorn-1.4.1/templates/default-setting.yaml b/charts/longhorn-1.4.1/templates/default-setting.yaml
new file mode 100644
index 0000000..49870a4
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/default-setting.yaml
@@ -0,0 +1,79 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: longhorn-default-setting
+ namespace: {{ include "release_namespace" . }}
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+data:
+ default-setting.yaml: |-
+ {{ if not (kindIs "invalid" .Values.defaultSettings.backupTarget) }}backup-target: {{ .Values.defaultSettings.backupTarget }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.backupTargetCredentialSecret) }}backup-target-credential-secret: {{ .Values.defaultSettings.backupTargetCredentialSecret }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.allowRecurringJobWhileVolumeDetached) }}allow-recurring-job-while-volume-detached: {{ .Values.defaultSettings.allowRecurringJobWhileVolumeDetached }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.createDefaultDiskLabeledNodes) }}create-default-disk-labeled-nodes: {{ .Values.defaultSettings.createDefaultDiskLabeledNodes }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.defaultDataPath) }}default-data-path: {{ .Values.defaultSettings.defaultDataPath }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.replicaSoftAntiAffinity) }}replica-soft-anti-affinity: {{ .Values.defaultSettings.replicaSoftAntiAffinity }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.replicaAutoBalance) }}replica-auto-balance: {{ .Values.defaultSettings.replicaAutoBalance }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.storageOverProvisioningPercentage) }}storage-over-provisioning-percentage: {{ .Values.defaultSettings.storageOverProvisioningPercentage }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.storageMinimalAvailablePercentage) }}storage-minimal-available-percentage: {{ .Values.defaultSettings.storageMinimalAvailablePercentage }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.upgradeChecker) }}upgrade-checker: {{ .Values.defaultSettings.upgradeChecker }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.defaultReplicaCount) }}default-replica-count: {{ .Values.defaultSettings.defaultReplicaCount }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.defaultDataLocality) }}default-data-locality: {{ .Values.defaultSettings.defaultDataLocality }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.defaultLonghornStaticStorageClass) }}default-longhorn-static-storage-class: {{ .Values.defaultSettings.defaultLonghornStaticStorageClass }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.backupstorePollInterval) }}backupstore-poll-interval: {{ .Values.defaultSettings.backupstorePollInterval }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.failedBackupTTL) }}failed-backup-ttl: {{ .Values.defaultSettings.failedBackupTTL }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.restoreVolumeRecurringJobs) }}restore-volume-recurring-jobs: {{ .Values.defaultSettings.restoreVolumeRecurringJobs }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.recurringSuccessfulJobsHistoryLimit) }}recurring-successful-jobs-history-limit: {{ .Values.defaultSettings.recurringSuccessfulJobsHistoryLimit }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.recurringFailedJobsHistoryLimit) }}recurring-failed-jobs-history-limit: {{ .Values.defaultSettings.recurringFailedJobsHistoryLimit }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.supportBundleFailedHistoryLimit) }}support-bundle-failed-history-limit: {{ .Values.defaultSettings.supportBundleFailedHistoryLimit }}{{ end }}
+ {{- if or (not (kindIs "invalid" .Values.defaultSettings.taintToleration)) (.Values.global.cattle.windowsCluster.enabled) }}
+ taint-toleration: {{ $windowsDefaultSettingTaintToleration := list }}{{ $defaultSettingTaintToleration := list -}}
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.defaultSetting.taintToleration -}}
+ {{- $windowsDefaultSettingTaintToleration = .Values.global.cattle.windowsCluster.defaultSetting.taintToleration -}}
+ {{- end -}}
+ {{- if not (kindIs "invalid" .Values.defaultSettings.taintToleration) -}}
+ {{- $defaultSettingTaintToleration = .Values.defaultSettings.taintToleration -}}
+ {{- end -}}
+ {{- $taintToleration := list $windowsDefaultSettingTaintToleration $defaultSettingTaintToleration }}{{ join ";" (compact $taintToleration) -}}
+ {{- end }}
+ {{- if or (not (kindIs "invalid" .Values.defaultSettings.systemManagedComponentsNodeSelector)) (.Values.global.cattle.windowsCluster.enabled) }}
+ system-managed-components-node-selector: {{ $windowsDefaultSettingNodeSelector := list }}{{ $defaultSettingNodeSelector := list -}}
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector -}}
+ {{ $windowsDefaultSettingNodeSelector = .Values.global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector -}}
+ {{- end -}}
+ {{- if not (kindIs "invalid" .Values.defaultSettings.systemManagedComponentsNodeSelector) -}}
+ {{- $defaultSettingNodeSelector = .Values.defaultSettings.systemManagedComponentsNodeSelector -}}
+ {{- end -}}
+ {{- $nodeSelector := list $windowsDefaultSettingNodeSelector $defaultSettingNodeSelector }}{{ join ";" (compact $nodeSelector) -}}
+ {{- end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.priorityClass) }}priority-class: {{ .Values.defaultSettings.priorityClass }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.autoSalvage) }}auto-salvage: {{ .Values.defaultSettings.autoSalvage }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly) }}auto-delete-pod-when-volume-detached-unexpectedly: {{ .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.disableSchedulingOnCordonedNode) }}disable-scheduling-on-cordoned-node: {{ .Values.defaultSettings.disableSchedulingOnCordonedNode }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.replicaZoneSoftAntiAffinity) }}replica-zone-soft-anti-affinity: {{ .Values.defaultSettings.replicaZoneSoftAntiAffinity }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.nodeDownPodDeletionPolicy) }}node-down-pod-deletion-policy: {{ .Values.defaultSettings.nodeDownPodDeletionPolicy }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.allowNodeDrainWithLastHealthyReplica) }}allow-node-drain-with-last-healthy-replica: {{ .Values.defaultSettings.allowNodeDrainWithLastHealthyReplica }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.mkfsExt4Parameters) }}mkfs-ext4-parameters: {{ .Values.defaultSettings.mkfsExt4Parameters }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.disableReplicaRebuild) }}disable-replica-rebuild: {{ .Values.defaultSettings.disableReplicaRebuild }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.replicaReplenishmentWaitInterval) }}replica-replenishment-wait-interval: {{ .Values.defaultSettings.replicaReplenishmentWaitInterval }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.concurrentReplicaRebuildPerNodeLimit) }}concurrent-replica-rebuild-per-node-limit: {{ .Values.defaultSettings.concurrentReplicaRebuildPerNodeLimit }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.concurrentVolumeBackupRestorePerNodeLimit) }}concurrent-volume-backup-restore-per-node-limit: {{ .Values.defaultSettings.concurrentVolumeBackupRestorePerNodeLimit }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.disableRevisionCounter) }}disable-revision-counter: {{ .Values.defaultSettings.disableRevisionCounter }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.systemManagedPodsImagePullPolicy) }}system-managed-pods-image-pull-policy: {{ .Values.defaultSettings.systemManagedPodsImagePullPolicy }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability) }}allow-volume-creation-with-degraded-availability: {{ .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot) }}auto-cleanup-system-generated-snapshot: {{ .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit) }}concurrent-automatic-engine-upgrade-per-node-limit: {{ .Values.defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.backingImageCleanupWaitInterval) }}backing-image-cleanup-wait-interval: {{ .Values.defaultSettings.backingImageCleanupWaitInterval }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.backingImageRecoveryWaitInterval) }}backing-image-recovery-wait-interval: {{ .Values.defaultSettings.backingImageRecoveryWaitInterval }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.guaranteedEngineManagerCPU) }}guaranteed-engine-manager-cpu: {{ .Values.defaultSettings.guaranteedEngineManagerCPU }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.guaranteedReplicaManagerCPU) }}guaranteed-replica-manager-cpu: {{ .Values.defaultSettings.guaranteedReplicaManagerCPU }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.kubernetesClusterAutoscalerEnabled) }}kubernetes-cluster-autoscaler-enabled: {{ .Values.defaultSettings.kubernetesClusterAutoscalerEnabled }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.orphanAutoDeletion) }}orphan-auto-deletion: {{ .Values.defaultSettings.orphanAutoDeletion }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.storageNetwork) }}storage-network: {{ .Values.defaultSettings.storageNetwork }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.deletingConfirmationFlag) }}deleting-confirmation-flag: {{ .Values.defaultSettings.deletingConfirmationFlag }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.engineReplicaTimeout) }}engine-replica-timeout: {{ .Values.defaultSettings.engineReplicaTimeout }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrity) }}snapshot-data-integrity: {{ .Values.defaultSettings.snapshotDataIntegrity }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation) }}snapshot-data-integrity-immediate-check-after-snapshot-creation: {{ .Values.defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrityCronjob) }}snapshot-data-integrity-cronjob: {{ .Values.defaultSettings.snapshotDataIntegrityCronjob }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.removeSnapshotsDuringFilesystemTrim) }}remove-snapshots-during-filesystem-trim: {{ .Values.defaultSettings.removeSnapshotsDuringFilesystemTrim }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.fastReplicaRebuildEnabled) }}fast-replica-rebuild-enabled: {{ .Values.defaultSettings.fastReplicaRebuildEnabled }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.replicaFileSyncHttpClientTimeout) }}replica-file-sync-http-client-timeout: {{ .Values.defaultSettings.replicaFileSyncHttpClientTimeout }}{{ end }}
\ No newline at end of file
diff --git a/charts/longhorn-1.4.1/templates/deployment-driver.yaml b/charts/longhorn-1.4.1/templates/deployment-driver.yaml
new file mode 100644
index 0000000..f162fbf
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/deployment-driver.yaml
@@ -0,0 +1,118 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: longhorn-driver-deployer
+ namespace: {{ include "release_namespace" . }}
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: longhorn-driver-deployer
+ template:
+ metadata:
+ labels: {{- include "longhorn.labels" . | nindent 8 }}
+ app: longhorn-driver-deployer
+ spec:
+ initContainers:
+ - name: wait-longhorn-manager
+ image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
+ command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done']
+ containers:
+ - name: longhorn-driver-deployer
+ image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command:
+ - longhorn-manager
+ - -d
+ - deploy-driver
+ - --manager-image
+ - "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}"
+ - --manager-url
+ - http://longhorn-backend:9500/v1
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: SERVICE_ACCOUNT
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.serviceAccountName
+ {{- if .Values.csi.kubeletRootDir }}
+ - name: KUBELET_ROOT_DIR
+ value: {{ .Values.csi.kubeletRootDir }}
+ {{- end }}
+ {{- if and .Values.image.csi.attacher.repository .Values.image.csi.attacher.tag }}
+ - name: CSI_ATTACHER_IMAGE
+ value: "{{ template "registry_url" . }}{{ .Values.image.csi.attacher.repository }}:{{ .Values.image.csi.attacher.tag }}"
+ {{- end }}
+ {{- if and .Values.image.csi.provisioner.repository .Values.image.csi.provisioner.tag }}
+ - name: CSI_PROVISIONER_IMAGE
+ value: "{{ template "registry_url" . }}{{ .Values.image.csi.provisioner.repository }}:{{ .Values.image.csi.provisioner.tag }}"
+ {{- end }}
+ {{- if and .Values.image.csi.nodeDriverRegistrar.repository .Values.image.csi.nodeDriverRegistrar.tag }}
+ - name: CSI_NODE_DRIVER_REGISTRAR_IMAGE
+ value: "{{ template "registry_url" . }}{{ .Values.image.csi.nodeDriverRegistrar.repository }}:{{ .Values.image.csi.nodeDriverRegistrar.tag }}"
+ {{- end }}
+ {{- if and .Values.image.csi.resizer.repository .Values.image.csi.resizer.tag }}
+ - name: CSI_RESIZER_IMAGE
+ value: "{{ template "registry_url" . }}{{ .Values.image.csi.resizer.repository }}:{{ .Values.image.csi.resizer.tag }}"
+ {{- end }}
+ {{- if and .Values.image.csi.snapshotter.repository .Values.image.csi.snapshotter.tag }}
+ - name: CSI_SNAPSHOTTER_IMAGE
+ value: "{{ template "registry_url" . }}{{ .Values.image.csi.snapshotter.repository }}:{{ .Values.image.csi.snapshotter.tag }}"
+ {{- end }}
+ {{- if and .Values.image.csi.livenessProbe.repository .Values.image.csi.livenessProbe.tag }}
+ - name: CSI_LIVENESS_PROBE_IMAGE
+ value: "{{ template "registry_url" . }}{{ .Values.image.csi.livenessProbe.repository }}:{{ .Values.image.csi.livenessProbe.tag }}"
+ {{- end }}
+ {{- if .Values.csi.attacherReplicaCount }}
+ - name: CSI_ATTACHER_REPLICA_COUNT
+ value: {{ .Values.csi.attacherReplicaCount | quote }}
+ {{- end }}
+ {{- if .Values.csi.provisionerReplicaCount }}
+ - name: CSI_PROVISIONER_REPLICA_COUNT
+ value: {{ .Values.csi.provisionerReplicaCount | quote }}
+ {{- end }}
+ {{- if .Values.csi.resizerReplicaCount }}
+ - name: CSI_RESIZER_REPLICA_COUNT
+ value: {{ .Values.csi.resizerReplicaCount | quote }}
+ {{- end }}
+ {{- if .Values.csi.snapshotterReplicaCount }}
+ - name: CSI_SNAPSHOTTER_REPLICA_COUNT
+ value: {{ .Values.csi.snapshotterReplicaCount | quote }}
+ {{- end }}
+
+ {{- if .Values.privateRegistry.registrySecret }}
+ imagePullSecrets:
+ - name: {{ .Values.privateRegistry.registrySecret }}
+ {{- end }}
+ {{- if .Values.longhornDriver.priorityClass }}
+ priorityClassName: {{ .Values.longhornDriver.priorityClass | quote }}
+ {{- end }}
+ {{- if or .Values.longhornDriver.tolerations .Values.global.cattle.windowsCluster.enabled }}
+ tolerations:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
+{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
+ {{- end }}
+ {{- if .Values.longhornDriver.tolerations }}
+{{ toYaml .Values.longhornDriver.tolerations | indent 6 }}
+ {{- end }}
+ {{- end }}
+ {{- if or .Values.longhornDriver.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
+ nodeSelector:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
+{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.longhornDriver.nodeSelector }}
+{{ toYaml .Values.longhornDriver.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- end }}
+ serviceAccountName: longhorn-service-account
+ securityContext:
+ runAsUser: 0
diff --git a/charts/longhorn-1.4.1/templates/deployment-recovery-backend.yaml b/charts/longhorn-1.4.1/templates/deployment-recovery-backend.yaml
new file mode 100644
index 0000000..81c8aba
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/deployment-recovery-backend.yaml
@@ -0,0 +1,83 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ app: longhorn-recovery-backend
+ name: longhorn-recovery-backend
+ namespace: {{ include "release_namespace" . }}
+spec:
+ replicas: {{ .Values.longhornRecoveryBackend.replicas }}
+ selector:
+ matchLabels:
+ app: longhorn-recovery-backend
+ template:
+ metadata:
+ labels: {{- include "longhorn.labels" . | nindent 8 }}
+ app: longhorn-recovery-backend
+ spec:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 1
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - longhorn-recovery-backend
+ topologyKey: kubernetes.io/hostname
+ containers:
+ - name: longhorn-recovery-backend
+ image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ securityContext:
+ runAsUser: 2000
+ command:
+ - longhorn-manager
+ - recovery-backend
+ - --service-account
+ - longhorn-service-account
+ ports:
+ - containerPort: 9600
+ name: recov-backend
+ readinessProbe:
+ tcpSocket:
+ port: 9600
+ initialDelaySeconds: 3
+ periodSeconds: 5
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ {{- if .Values.privateRegistry.registrySecret }}
+ imagePullSecrets:
+ - name: {{ .Values.privateRegistry.registrySecret }}
+ {{- end }}
+ {{- if .Values.longhornRecoveryBackend.priorityClass }}
+ priorityClassName: {{ .Values.longhornRecoveryBackend.priorityClass | quote }}
+ {{- end }}
+ {{- if or .Values.longhornRecoveryBackend.tolerations .Values.global.cattle.windowsCluster.enabled }}
+ tolerations:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
+{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
+ {{- end }}
+ {{- if .Values.longhornRecoveryBackend.tolerations }}
+{{ toYaml .Values.longhornRecoveryBackend.tolerations | indent 6 }}
+ {{- end }}
+ {{- end }}
+ {{- if or .Values.longhornRecoveryBackend.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
+ nodeSelector:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
+{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.longhornRecoveryBackend.nodeSelector }}
+{{ toYaml .Values.longhornRecoveryBackend.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- end }}
+ serviceAccountName: longhorn-service-account
diff --git a/charts/longhorn-1.4.1/templates/deployment-ui.yaml b/charts/longhorn-1.4.1/templates/deployment-ui.yaml
new file mode 100644
index 0000000..6bad5cd
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/deployment-ui.yaml
@@ -0,0 +1,114 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ app: longhorn-ui
+ name: longhorn-ui
+ namespace: {{ include "release_namespace" . }}
+spec:
+ replicas: {{ .Values.longhornUI.replicas }}
+ selector:
+ matchLabels:
+ app: longhorn-ui
+ template:
+ metadata:
+ labels: {{- include "longhorn.labels" . | nindent 8 }}
+ app: longhorn-ui
+ spec:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 1
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - longhorn-ui
+ topologyKey: kubernetes.io/hostname
+ containers:
+ - name: longhorn-ui
+ image: {{ template "registry_url" . }}{{ .Values.image.longhorn.ui.repository }}:{{ .Values.image.longhorn.ui.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ volumeMounts:
+ - name : nginx-cache
+ mountPath: /var/cache/nginx/
+ - name : nginx-config
+ mountPath: /var/config/nginx/
+ - name: var-run
+ mountPath: /var/run/
+ ports:
+ - containerPort: 8000
+ name: http
+ env:
+ - name: LONGHORN_MANAGER_IP
+ value: "http://longhorn-backend:9500"
+ - name: LONGHORN_UI_PORT
+ value: "8000"
+ volumes:
+ - emptyDir: {}
+ name: nginx-cache
+ - emptyDir: {}
+ name: nginx-config
+ - emptyDir: {}
+ name: var-run
+ {{- if .Values.privateRegistry.registrySecret }}
+ imagePullSecrets:
+ - name: {{ .Values.privateRegistry.registrySecret }}
+ {{- end }}
+ {{- if .Values.longhornUI.priorityClass }}
+ priorityClassName: {{ .Values.longhornUI.priorityClass | quote }}
+ {{- end }}
+ {{- if or .Values.longhornUI.tolerations .Values.global.cattle.windowsCluster.enabled }}
+ tolerations:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
+{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
+ {{- end }}
+ {{- if .Values.longhornUI.tolerations }}
+{{ toYaml .Values.longhornUI.tolerations | indent 6 }}
+ {{- end }}
+ {{- end }}
+ {{- if or .Values.longhornUI.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
+ nodeSelector:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
+{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.longhornUI.nodeSelector }}
+{{ toYaml .Values.longhornUI.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- end }}
+---
+kind: Service
+apiVersion: v1
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ app: longhorn-ui
+ {{- if eq .Values.service.ui.type "Rancher-Proxy" }}
+ kubernetes.io/cluster-service: "true"
+ {{- end }}
+ name: longhorn-frontend
+ namespace: {{ include "release_namespace" . }}
+spec:
+ {{- if eq .Values.service.ui.type "Rancher-Proxy" }}
+ type: ClusterIP
+ {{- else }}
+ type: {{ .Values.service.ui.type }}
+ {{- end }}
+ {{- if and .Values.service.ui.loadBalancerIP (eq .Values.service.ui.type "LoadBalancer") }}
+ loadBalancerIP: {{ .Values.service.ui.loadBalancerIP }}
+ {{- end }}
+ {{- if and (eq .Values.service.ui.type "LoadBalancer") .Values.service.ui.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges: {{- toYaml .Values.service.ui.loadBalancerSourceRanges | nindent 4 }}
+ {{- end }}
+ selector:
+ app: longhorn-ui
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ {{- if .Values.service.ui.nodePort }}
+ nodePort: {{ .Values.service.ui.nodePort }}
+ {{- else }}
+ nodePort: null
+ {{- end }}
diff --git a/charts/longhorn-1.4.1/templates/deployment-webhook.yaml b/charts/longhorn-1.4.1/templates/deployment-webhook.yaml
new file mode 100644
index 0000000..c4d353a
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/deployment-webhook.yaml
@@ -0,0 +1,166 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ app: longhorn-conversion-webhook
+ name: longhorn-conversion-webhook
+ namespace: {{ include "release_namespace" . }}
+spec:
+ replicas: {{ .Values.longhornConversionWebhook.replicas }}
+ selector:
+ matchLabels:
+ app: longhorn-conversion-webhook
+ template:
+ metadata:
+ labels: {{- include "longhorn.labels" . | nindent 8 }}
+ app: longhorn-conversion-webhook
+ spec:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 1
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - longhorn-conversion-webhook
+ topologyKey: kubernetes.io/hostname
+ containers:
+ - name: longhorn-conversion-webhook
+ image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ securityContext:
+ runAsUser: 2000
+ command:
+ - longhorn-manager
+ - conversion-webhook
+ - --service-account
+ - longhorn-service-account
+ ports:
+ - containerPort: 9443
+ name: conversion-wh
+ readinessProbe:
+ tcpSocket:
+ port: 9443
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- if .Values.privateRegistry.registrySecret }}
+ imagePullSecrets:
+ - name: {{ .Values.privateRegistry.registrySecret }}
+ {{- end }}
+ {{- if .Values.longhornConversionWebhook.priorityClass }}
+ priorityClassName: {{ .Values.longhornConversionWebhook.priorityClass | quote }}
+ {{- end }}
+ {{- if or .Values.longhornConversionWebhook.tolerations .Values.global.cattle.windowsCluster.enabled }}
+ tolerations:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
+{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
+ {{- end }}
+ {{- if .Values.longhornConversionWebhook.tolerations }}
+{{ toYaml .Values.longhornConversionWebhook.tolerations | indent 6 }}
+ {{- end }}
+ {{- end }}
+ {{- if or .Values.longhornConversionWebhook.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
+ nodeSelector:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
+{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.longhornConversionWebhook.nodeSelector }}
+{{ toYaml .Values.longhornConversionWebhook.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- end }}
+ serviceAccountName: longhorn-service-account
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ app: longhorn-admission-webhook
+ name: longhorn-admission-webhook
+ namespace: {{ include "release_namespace" . }}
+spec:
+ replicas: {{ .Values.longhornAdmissionWebhook.replicas }}
+ selector:
+ matchLabels:
+ app: longhorn-admission-webhook
+ template:
+ metadata:
+ labels: {{- include "longhorn.labels" . | nindent 8 }}
+ app: longhorn-admission-webhook
+ spec:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 1
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - longhorn-admission-webhook
+ topologyKey: kubernetes.io/hostname
+ initContainers:
+ - name: wait-longhorn-conversion-webhook
+ image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
+ command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" -k https://longhorn-conversion-webhook:9443/v1/healthz) != "200" ]; do echo waiting; sleep 2; done']
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ securityContext:
+ runAsUser: 2000
+ containers:
+ - name: longhorn-admission-webhook
+ image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ securityContext:
+ runAsUser: 2000
+ command:
+ - longhorn-manager
+ - admission-webhook
+ - --service-account
+ - longhorn-service-account
+ ports:
+ - containerPort: 9443
+ name: admission-wh
+ readinessProbe:
+ tcpSocket:
+ port: 9443
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ {{- if .Values.privateRegistry.registrySecret }}
+ imagePullSecrets:
+ - name: {{ .Values.privateRegistry.registrySecret }}
+ {{- end }}
+ {{- if .Values.longhornAdmissionWebhook.priorityClass }}
+ priorityClassName: {{ .Values.longhornAdmissionWebhook.priorityClass | quote }}
+ {{- end }}
+ {{- if or .Values.longhornAdmissionWebhook.tolerations .Values.global.cattle.windowsCluster.enabled }}
+ tolerations:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
+{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
+ {{- end }}
+ {{- if .Values.longhornAdmissionWebhook.tolerations }}
+{{ toYaml .Values.longhornAdmissionWebhook.tolerations | indent 6 }}
+ {{- end }}
+ {{- end }}
+ {{- if or .Values.longhornAdmissionWebhook.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
+ nodeSelector:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
+{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.longhornAdmissionWebhook.nodeSelector }}
+{{ toYaml .Values.longhornAdmissionWebhook.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- end }}
+ serviceAccountName: longhorn-service-account
diff --git a/charts/longhorn-1.4.1/templates/ingress.yaml b/charts/longhorn-1.4.1/templates/ingress.yaml
new file mode 100644
index 0000000..ee47f8b
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/ingress.yaml
@@ -0,0 +1,48 @@
+{{- if .Values.ingress.enabled }}
+{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: networking.k8s.io/v1
+{{- else -}}
+apiVersion: networking.k8s.io/v1beta1
+{{- end }}
+kind: Ingress
+metadata:
+ name: longhorn-ingress
+ namespace: {{ include "release_namespace" . }}
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ app: longhorn-ingress
+ annotations:
+ {{- if .Values.ingress.secureBackends }}
+ ingress.kubernetes.io/secure-backends: "true"
+ {{- end }}
+ {{- range $key, $value := .Values.ingress.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+spec:
+ {{- if and .Values.ingress.ingressClassName (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
+ ingressClassName: {{ .Values.ingress.ingressClassName }}
+ {{- end }}
+ rules:
+ - host: {{ .Values.ingress.host }}
+ http:
+ paths:
+ - path: {{ default "" .Values.ingress.path }}
+ {{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
+ pathType: ImplementationSpecific
+ {{- end }}
+ backend:
+ {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
+ service:
+ name: longhorn-frontend
+ port:
+ number: 80
+ {{- else }}
+ serviceName: longhorn-frontend
+ servicePort: 80
+ {{- end }}
+{{- if .Values.ingress.tls }}
+ tls:
+ - hosts:
+ - {{ .Values.ingress.host }}
+ secretName: {{ .Values.ingress.tlsSecret }}
+{{- end }}
+{{- end }}
diff --git a/charts/longhorn-1.4.1/templates/postupgrade-job.yaml b/charts/longhorn-1.4.1/templates/postupgrade-job.yaml
new file mode 100644
index 0000000..b9b2eeb
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/postupgrade-job.yaml
@@ -0,0 +1,58 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ annotations:
+ "helm.sh/hook": post-upgrade
+ "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation
+ name: longhorn-post-upgrade
+ namespace: {{ include "release_namespace" . }}
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+spec:
+ activeDeadlineSeconds: 900
+ backoffLimit: 1
+ template:
+ metadata:
+ name: longhorn-post-upgrade
+ labels: {{- include "longhorn.labels" . | nindent 8 }}
+ spec:
+ containers:
+ - name: longhorn-post-upgrade
+ image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ securityContext:
+ privileged: true
+ command:
+ - longhorn-manager
+ - post-upgrade
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ restartPolicy: OnFailure
+ {{- if .Values.privateRegistry.registrySecret }}
+ imagePullSecrets:
+ - name: {{ .Values.privateRegistry.registrySecret }}
+ {{- end }}
+ {{- if .Values.longhornManager.priorityClass }}
+ priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
+ {{- end }}
+ serviceAccountName: longhorn-service-account
+ {{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
+ tolerations:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
+{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
+ {{- end }}
+ {{- if .Values.longhornManager.tolerations }}
+{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
+ {{- end }}
+ {{- end }}
+ {{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
+ nodeSelector:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
+{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.longhornManager.nodeSelector }}
+{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- end }}
diff --git a/charts/longhorn-1.4.1/templates/psp.yaml b/charts/longhorn-1.4.1/templates/psp.yaml
new file mode 100644
index 0000000..a2dfc05
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/psp.yaml
@@ -0,0 +1,66 @@
+{{- if .Values.enablePSP }}
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: longhorn-psp
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+spec:
+ privileged: true
+ allowPrivilegeEscalation: true
+ requiredDropCapabilities:
+ - NET_RAW
+ allowedCapabilities:
+ - SYS_ADMIN
+ hostNetwork: false
+ hostIPC: false
+ hostPID: true
+ runAsUser:
+ rule: RunAsAny
+ seLinux:
+ rule: RunAsAny
+ fsGroup:
+ rule: RunAsAny
+ supplementalGroups:
+ rule: RunAsAny
+ volumes:
+ - configMap
+ - downwardAPI
+ - emptyDir
+ - secret
+ - projected
+ - hostPath
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: longhorn-psp-role
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ namespace: {{ include "release_namespace" . }}
+rules:
+- apiGroups:
+ - policy
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
+ resourceNames:
+ - longhorn-psp
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: longhorn-psp-binding
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ namespace: {{ include "release_namespace" . }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: longhorn-psp-role
+subjects:
+- kind: ServiceAccount
+ name: longhorn-service-account
+ namespace: {{ include "release_namespace" . }}
+- kind: ServiceAccount
+ name: default
+ namespace: {{ include "release_namespace" . }}
+{{- end }}
diff --git a/charts/longhorn-1.4.1/templates/registry-secret.yaml b/charts/longhorn-1.4.1/templates/registry-secret.yaml
new file mode 100644
index 0000000..3c6b1dc
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/registry-secret.yaml
@@ -0,0 +1,13 @@
+{{- if .Values.privateRegistry.createSecret }}
+{{- if .Values.privateRegistry.registrySecret }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Values.privateRegistry.registrySecret }}
+ namespace: {{ include "release_namespace" . }}
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+type: kubernetes.io/dockerconfigjson
+data:
+ .dockerconfigjson: {{ template "secret" . }}
+{{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/longhorn-1.4.1/templates/serviceaccount.yaml b/charts/longhorn-1.4.1/templates/serviceaccount.yaml
new file mode 100644
index 0000000..a563d68
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/serviceaccount.yaml
@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: longhorn-service-account
+ namespace: {{ include "release_namespace" . }}
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: longhorn-support-bundle
+ namespace: {{ include "release_namespace" . }}
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
\ No newline at end of file
diff --git a/charts/longhorn-1.4.1/templates/services.yaml b/charts/longhorn-1.4.1/templates/services.yaml
new file mode 100644
index 0000000..cd008db
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/services.yaml
@@ -0,0 +1,74 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ app: longhorn-conversion-webhook
+ name: longhorn-conversion-webhook
+ namespace: {{ include "release_namespace" . }}
+spec:
+ type: ClusterIP
+ sessionAffinity: ClientIP
+ selector:
+ app: longhorn-conversion-webhook
+ ports:
+ - name: conversion-webhook
+ port: 9443
+ targetPort: conversion-wh
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ app: longhorn-admission-webhook
+ name: longhorn-admission-webhook
+ namespace: {{ include "release_namespace" . }}
+spec:
+ type: ClusterIP
+ sessionAffinity: ClientIP
+ selector:
+ app: longhorn-admission-webhook
+ ports:
+ - name: admission-webhook
+ port: 9443
+ targetPort: admission-wh
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ app: longhorn-recovery-backend
+ name: longhorn-recovery-backend
+ namespace: {{ include "release_namespace" . }}
+spec:
+ type: ClusterIP
+ sessionAffinity: ClientIP
+ selector:
+ app: longhorn-recovery-backend
+ ports:
+ - name: recovery-backend
+ port: 9600
+ targetPort: recov-backend
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ name: longhorn-engine-manager
+ namespace: {{ include "release_namespace" . }}
+spec:
+ clusterIP: None
+ selector:
+ longhorn.io/component: instance-manager
+ longhorn.io/instance-manager-type: engine
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ name: longhorn-replica-manager
+ namespace: {{ include "release_namespace" . }}
+spec:
+ clusterIP: None
+ selector:
+ longhorn.io/component: instance-manager
+ longhorn.io/instance-manager-type: replica
diff --git a/charts/longhorn-1.4.1/templates/storageclass.yaml b/charts/longhorn-1.4.1/templates/storageclass.yaml
new file mode 100644
index 0000000..6832517
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/storageclass.yaml
@@ -0,0 +1,44 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: longhorn-storageclass
+ namespace: {{ include "release_namespace" . }}
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+data:
+ storageclass.yaml: |
+ kind: StorageClass
+ apiVersion: storage.k8s.io/v1
+ metadata:
+ name: longhorn
+ annotations:
+ storageclass.kubernetes.io/is-default-class: {{ .Values.persistence.defaultClass | quote }}
+ provisioner: driver.longhorn.io
+ allowVolumeExpansion: true
+ reclaimPolicy: "{{ .Values.persistence.reclaimPolicy }}"
+ volumeBindingMode: Immediate
+ parameters:
+ numberOfReplicas: "{{ .Values.persistence.defaultClassReplicaCount }}"
+ staleReplicaTimeout: "30"
+ fromBackup: ""
+ {{- if .Values.persistence.defaultFsType }}
+ fsType: "{{ .Values.persistence.defaultFsType }}"
+ {{- end }}
+ {{- if .Values.persistence.defaultMkfsParams }}
+ mkfsParams: "{{ .Values.persistence.defaultMkfsParams }}"
+ {{- end }}
+ {{- if .Values.persistence.migratable }}
+ migratable: "{{ .Values.persistence.migratable }}"
+ {{- end }}
+ {{- if .Values.persistence.backingImage.enable }}
+ backingImage: {{ .Values.persistence.backingImage.name }}
+ backingImageDataSourceType: {{ .Values.persistence.backingImage.dataSourceType }}
+ backingImageDataSourceParameters: {{ .Values.persistence.backingImage.dataSourceParameters }}
+ backingImageChecksum: {{ .Values.persistence.backingImage.expectedChecksum }}
+ {{- end }}
+ {{- if .Values.persistence.recurringJobSelector.enable }}
+ recurringJobSelector: '{{ .Values.persistence.recurringJobSelector.jobList }}'
+ {{- end }}
+ dataLocality: {{ .Values.persistence.defaultDataLocality | quote }}
+ {{- if .Values.persistence.defaultNodeSelector.enable }}
+ nodeSelector: "{{ .Values.persistence.defaultNodeSelector.selector }}"
+ {{- end }}
diff --git a/charts/longhorn-1.4.1/templates/tls-secrets.yaml b/charts/longhorn-1.4.1/templates/tls-secrets.yaml
new file mode 100644
index 0000000..74c4342
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/tls-secrets.yaml
@@ -0,0 +1,16 @@
+{{- if .Values.ingress.enabled }}
+{{- range .Values.ingress.secrets }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .name }}
+ namespace: {{ include "release_namespace" $ }}
+ labels: {{- include "longhorn.labels" $ | nindent 4 }}
+ app: longhorn
+type: kubernetes.io/tls
+data:
+ tls.crt: {{ .certificate | b64enc }}
+ tls.key: {{ .key | b64enc }}
+---
+{{- end }}
+{{- end }}
diff --git a/charts/longhorn-1.4.1/templates/uninstall-job.yaml b/charts/longhorn-1.4.1/templates/uninstall-job.yaml
new file mode 100644
index 0000000..989933d
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/uninstall-job.yaml
@@ -0,0 +1,59 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ annotations:
+ "helm.sh/hook": pre-delete
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ name: longhorn-uninstall
+ namespace: {{ include "release_namespace" . }}
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+spec:
+ activeDeadlineSeconds: 900
+ backoffLimit: 1
+ template:
+ metadata:
+ name: longhorn-uninstall
+ labels: {{- include "longhorn.labels" . | nindent 8 }}
+ spec:
+ containers:
+ - name: longhorn-uninstall
+ image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ securityContext:
+ privileged: true
+ command:
+ - longhorn-manager
+ - uninstall
+ - --force
+ env:
+ - name: LONGHORN_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ restartPolicy: Never
+ {{- if .Values.privateRegistry.registrySecret }}
+ imagePullSecrets:
+ - name: {{ .Values.privateRegistry.registrySecret }}
+ {{- end }}
+ {{- if .Values.longhornManager.priorityClass }}
+ priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
+ {{- end }}
+ serviceAccountName: longhorn-service-account
+ {{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
+ tolerations:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
+{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
+ {{- end }}
+ {{- if .Values.longhornManager.tolerations }}
+{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
+ {{- end }}
+ {{- end }}
+ {{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
+ nodeSelector:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
+{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if or .Values.longhornManager.nodeSelector }}
+{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- end }}
diff --git a/charts/longhorn-1.4.1/templates/validate-psp-install.yaml b/charts/longhorn-1.4.1/templates/validate-psp-install.yaml
new file mode 100644
index 0000000..0df98e3
--- /dev/null
+++ b/charts/longhorn-1.4.1/templates/validate-psp-install.yaml
@@ -0,0 +1,7 @@
+#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
+#{{- if .Values.enablePSP }}
+#{{- if not (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }}
+#{{- fail "The target cluster does not have the PodSecurityPolicy API resource. Please disable PSPs in this chart before proceeding." -}}
+#{{- end }}
+#{{- end }}
+#{{- end }}
\ No newline at end of file
diff --git a/charts/longhorn-1.4.1/values.yaml b/charts/longhorn-1.4.1/values.yaml
new file mode 100644
index 0000000..3ded6cd
--- /dev/null
+++ b/charts/longhorn-1.4.1/values.yaml
@@ -0,0 +1,332 @@
+# Default values for longhorn.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+global:
+ cattle:
+ systemDefaultRegistry: ""
+ windowsCluster:
+ # Enable this to allow Longhorn to run on the Rancher deployed Windows cluster
+ enabled: false
+ # Tolerate Linux node taint
+ tolerations:
+ - key: "cattle.io/os"
+ value: "linux"
+ effect: "NoSchedule"
+ operator: "Equal"
+ # Select Linux nodes
+ nodeSelector:
+ kubernetes.io/os: "linux"
+ # Recognize toleration and node selector for Longhorn run-time created components
+ defaultSetting:
+ taintToleration: cattle.io/os=linux:NoSchedule
+ systemManagedComponentsNodeSelector: kubernetes.io/os:linux
+
+image:
+ longhorn:
+ engine:
+ repository: longhornio/longhorn-engine
+ tag: v1.4.1
+ manager:
+ repository: longhornio/longhorn-manager
+ tag: v1.4.1
+ ui:
+ repository: longhornio/longhorn-ui
+ tag: v1.4.1
+ instanceManager:
+ repository: longhornio/longhorn-instance-manager
+ tag: v1.4.1
+ shareManager:
+ repository: longhornio/longhorn-share-manager
+ tag: v1.4.1
+ backingImageManager:
+ repository: longhornio/backing-image-manager
+ tag: v1.4.1
+ supportBundleKit:
+ repository: longhornio/support-bundle-kit
+ tag: v0.0.19
+ csi:
+ attacher:
+ repository: longhornio/csi-attacher
+ tag: v3.4.0
+ provisioner:
+ repository: longhornio/csi-provisioner
+ tag: v2.1.2
+ nodeDriverRegistrar:
+ repository: longhornio/csi-node-driver-registrar
+ tag: v2.5.0
+ resizer:
+ repository: longhornio/csi-resizer
+ tag: v1.3.0
+ snapshotter:
+ repository: longhornio/csi-snapshotter
+ tag: v5.0.1
+ livenessProbe:
+ repository: longhornio/livenessprobe
+ tag: v2.8.0
+ pullPolicy: IfNotPresent
+
+service:
+ ui:
+ type: ClusterIP
+ nodePort: null
+ manager:
+ type: ClusterIP
+ nodePort: ""
+ loadBalancerIP: ""
+ loadBalancerSourceRanges: ""
+
+persistence:
+ defaultClass: true
+ defaultFsType: ext4
+ defaultMkfsParams: ""
+ defaultClassReplicaCount: 3
+ defaultDataLocality: disabled # best-effort otherwise
+ reclaimPolicy: Delete
+ migratable: false
+ recurringJobSelector:
+ enable: false
+ jobList: []
+ backingImage:
+ enable: false
+ name: ~
+ dataSourceType: ~
+ dataSourceParameters: ~
+ expectedChecksum: ~
+ defaultNodeSelector:
+ enable: false # disable by default
+ selector: []
+ removeSnapshotsDuringFilesystemTrim: ignored # "enabled" or "disabled" otherwise
+
+csi:
+ kubeletRootDir: ~
+ attacherReplicaCount: ~
+ provisionerReplicaCount: ~
+ resizerReplicaCount: ~
+ snapshotterReplicaCount: ~
+
+defaultSettings:
+ backupTarget: ~
+ backupTargetCredentialSecret: ~
+ allowRecurringJobWhileVolumeDetached: ~
+ createDefaultDiskLabeledNodes: ~
+ defaultDataPath: ~
+ defaultDataLocality: ~
+ replicaSoftAntiAffinity: ~
+ replicaAutoBalance: ~
+ storageOverProvisioningPercentage: ~
+ storageMinimalAvailablePercentage: ~
+ upgradeChecker: ~
+ defaultReplicaCount: ~
+ defaultLonghornStaticStorageClass: ~
+ backupstorePollInterval: ~
+ failedBackupTTL: ~
+ restoreVolumeRecurringJobs: ~
+ recurringSuccessfulJobsHistoryLimit: ~
+ recurringFailedJobsHistoryLimit: ~
+ supportBundleFailedHistoryLimit: ~
+ taintToleration: ~
+ systemManagedComponentsNodeSelector: ~
+ priorityClass: ~
+ autoSalvage: ~
+ autoDeletePodWhenVolumeDetachedUnexpectedly: ~
+ disableSchedulingOnCordonedNode: ~
+ replicaZoneSoftAntiAffinity: ~
+ nodeDownPodDeletionPolicy: ~
+ allowNodeDrainWithLastHealthyReplica: ~
+ mkfsExt4Parameters: ~
+ disableReplicaRebuild: ~
+ replicaReplenishmentWaitInterval: ~
+ concurrentReplicaRebuildPerNodeLimit: ~
+ concurrentVolumeBackupRestorePerNodeLimit: ~
+ disableRevisionCounter: ~
+ systemManagedPodsImagePullPolicy: ~
+ allowVolumeCreationWithDegradedAvailability: ~
+ autoCleanupSystemGeneratedSnapshot: ~
+ concurrentAutomaticEngineUpgradePerNodeLimit: ~
+ backingImageCleanupWaitInterval: ~
+ backingImageRecoveryWaitInterval: ~
+ guaranteedEngineManagerCPU: ~
+ guaranteedReplicaManagerCPU: ~
+ kubernetesClusterAutoscalerEnabled: ~
+ orphanAutoDeletion: ~
+ storageNetwork: ~
+ deletingConfirmationFlag: ~
+ engineReplicaTimeout: ~
+ snapshotDataIntegrity: ~
+ snapshotDataIntegrityImmediateCheckAfterSnapshotCreation: ~
+ snapshotDataIntegrityCronjob: ~
+ removeSnapshotsDuringFilesystemTrim: ~
+ fastReplicaRebuildEnabled: ~
+ replicaFileSyncHttpClientTimeout: ~
+privateRegistry:
+ createSecret: ~
+ registryUrl: ~
+ registryUser: ~
+ registryPasswd: ~
+ registrySecret: ~
+
+longhornManager:
+ log:
+ ## Allowed values are `plain` or `json`.
+ format: plain
+ priorityClass: ~
+ tolerations: []
+ ## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above
+ ## and uncomment this example block
+ # - key: "key"
+ # operator: "Equal"
+ # value: "value"
+ # effect: "NoSchedule"
+ nodeSelector: {}
+ ## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above
+ ## and uncomment this example block
+ # label-key1: "label-value1"
+ # label-key2: "label-value2"
+ serviceAnnotations: {}
+ ## If you want to set annotations for the Longhorn Manager service, delete the `{}` in the line above
+ ## and uncomment this example block
+ # annotation-key1: "annotation-value1"
+ # annotation-key2: "annotation-value2"
+
+longhornDriver:
+ priorityClass: ~
+ tolerations: []
+ ## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above
+ ## and uncomment this example block
+ # - key: "key"
+ # operator: "Equal"
+ # value: "value"
+ # effect: "NoSchedule"
+ nodeSelector: {}
+ ## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above
+ ## and uncomment this example block
+ # label-key1: "label-value1"
+ # label-key2: "label-value2"
+
+longhornUI:
+ replicas: 2
+ priorityClass: ~
+ tolerations: []
+ ## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above
+ ## and uncomment this example block
+ # - key: "key"
+ # operator: "Equal"
+ # value: "value"
+ # effect: "NoSchedule"
+ nodeSelector: {}
+ ## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above
+ ## and uncomment this example block
+ # label-key1: "label-value1"
+ # label-key2: "label-value2"
+
+longhornConversionWebhook:
+ replicas: 2
+ priorityClass: ~
+ tolerations: []
+ ## If you want to set tolerations for Longhorn conversion webhook Deployment, delete the `[]` in the line above
+ ## and uncomment this example block
+ # - key: "key"
+ # operator: "Equal"
+ # value: "value"
+ # effect: "NoSchedule"
+ nodeSelector: {}
+ ## If you want to set node selector for Longhorn conversion webhook Deployment, delete the `{}` in the line above
+ ## and uncomment this example block
+ # label-key1: "label-value1"
+ # label-key2: "label-value2"
+
+longhornAdmissionWebhook:
+ replicas: 2
+ priorityClass: ~
+ tolerations: []
+ ## If you want to set tolerations for Longhorn admission webhook Deployment, delete the `[]` in the line above
+ ## and uncomment this example block
+ # - key: "key"
+ # operator: "Equal"
+ # value: "value"
+ # effect: "NoSchedule"
+ nodeSelector: {}
+ ## If you want to set node selector for Longhorn admission webhook Deployment, delete the `{}` in the line above
+ ## and uncomment this example block
+ # label-key1: "label-value1"
+ # label-key2: "label-value2"
+
+longhornRecoveryBackend:
+ replicas: 2
+ priorityClass: ~
+ tolerations: []
+ ## If you want to set tolerations for Longhorn recovery backend Deployment, delete the `[]` in the line above
+ ## and uncomment this example block
+ # - key: "key"
+ # operator: "Equal"
+ # value: "value"
+ # effect: "NoSchedule"
+ nodeSelector: {}
+ ## If you want to set node selector for Longhorn recovery backend Deployment, delete the `{}` in the line above
+ ## and uncomment this example block
+ # label-key1: "label-value1"
+ # label-key2: "label-value2"
+
+ingress:
+ ## Set to true to enable ingress record generation
+ enabled: false
+
+ ## Add ingressClassName to the Ingress
+ ## Can replace the kubernetes.io/ingress.class annotation on v1.18+
+ ingressClassName: ~
+
+ host: sslip.io
+
+ ## Set this to true in order to enable TLS on the ingress record
+ tls: false
+
+ ## Enable this in order to enable that the backend service will be connected at port 443
+ secureBackends: false
+
+ ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
+ tlsSecret: longhorn.local-tls
+
+ ## If ingress is enabled you can set the default ingress path
+ ## then you can access the UI by using the following full path {{host}}+{{path}}
+ path: /
+
+ ## Ingress annotations done as key:value pairs
+ ## If you're using kube-lego, you will want to add:
+ ## kubernetes.io/tls-acme: true
+ ##
+ ## For a full list of possible ingress annotations, please see
+ ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md
+ ##
+ ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
+ annotations:
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: true
+
+ secrets:
+ ## If you're providing your own certificates, please use this to add the certificates as secrets
+ ## key and certificate should start with -----BEGIN CERTIFICATE----- or
+ ## -----BEGIN RSA PRIVATE KEY-----
+ ##
+ ## name should line up with a tlsSecret set further up
+ ## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set
+ ##
+ ## It is also possible to create and manage the certificates outside of this helm chart
+ ## Please see README.md for more information
+ # - name: longhorn.local-tls
+ # key:
+ # certificate:
+
+# For Kubernetes < v1.25, if your cluster enables Pod Security Policy admission controller,
+# set this to `true` to ship longhorn-psp which allow privileged Longhorn pods to start
+enablePSP: false
+
+## Specify override namespace, specifically this is useful for using longhorn as sub-chart
+## and its release namespace is not the `longhorn-system`
+namespaceOverride: ""
+
+# Annotations to add to the Longhorn Manager DaemonSet Pods. Optional.
+annotations: {}
+
+serviceAccount:
+ # Annotations to add to the service account
+ annotations: {}
diff --git a/charts/longhorn/.helmignore b/charts/longhorn/.helmignore
new file mode 100644
index 0000000..f0c1319
--- /dev/null
+++ b/charts/longhorn/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/charts/longhorn/Chart.yaml b/charts/longhorn/Chart.yaml
new file mode 100644
index 0000000..1609eb5
--- /dev/null
+++ b/charts/longhorn/Chart.yaml
@@ -0,0 +1,28 @@
+apiVersion: v1
+appVersion: v1.5.2
+description: Longhorn is a distributed block storage system for Kubernetes.
+home: https://github.com/longhorn/longhorn
+icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/longhorn/icon/color/longhorn-icon-color.png
+keywords:
+- longhorn
+- storage
+- distributed
+- block
+- device
+- iscsi
+- nfs
+kubeVersion: '>=1.21.0-0'
+maintainers:
+- email: maintainers@longhorn.io
+ name: Longhorn maintainers
+name: longhorn
+sources:
+- https://github.com/longhorn/longhorn
+- https://github.com/longhorn/longhorn-engine
+- https://github.com/longhorn/longhorn-instance-manager
+- https://github.com/longhorn/longhorn-share-manager
+- https://github.com/longhorn/longhorn-manager
+- https://github.com/longhorn/longhorn-ui
+- https://github.com/longhorn/longhorn-tests
+- https://github.com/longhorn/backing-image-manager
+version: 1.5.2
diff --git a/charts/longhorn/README.md b/charts/longhorn/README.md
new file mode 100644
index 0000000..012c058
--- /dev/null
+++ b/charts/longhorn/README.md
@@ -0,0 +1,78 @@
+# Longhorn Chart
+
+> **Important**: Please install the Longhorn chart in the `longhorn-system` namespace only.
+
+> **Warning**: Longhorn doesn't support downgrading from a higher version to a lower version.
+
+## Source Code
+
+Longhorn is 100% open source software. Project source code is spread across a number of repos:
+
+1. Longhorn Engine -- Core controller/replica logic https://github.com/longhorn/longhorn-engine
+2. Longhorn Instance Manager -- Controller/replica instance lifecycle management https://github.com/longhorn/longhorn-instance-manager
+3. Longhorn Share Manager -- NFS provisioner that exposes Longhorn volumes as ReadWriteMany volumes. https://github.com/longhorn/longhorn-share-manager
+4. Backing Image Manager -- Backing image file lifecycle management. https://github.com/longhorn/backing-image-manager
+5. Longhorn Manager -- Longhorn orchestration, includes CSI driver for Kubernetes https://github.com/longhorn/longhorn-manager
+6. Longhorn UI -- Dashboard https://github.com/longhorn/longhorn-ui
+
+## Prerequisites
+
+1. A container runtime compatible with Kubernetes (Docker v1.13+, containerd v1.3.7+, etc.)
+2. Kubernetes >= v1.21
+3. Make sure `bash`, `curl`, `findmnt`, `grep`, `awk` and `blkid` has been installed in all nodes of the Kubernetes cluster.
+4. Make sure `open-iscsi` has been installed, and the `iscsid` daemon is running on all nodes of the Kubernetes cluster. For GKE, recommended Ubuntu as guest OS image since it contains `open-iscsi` already.
+
+## Upgrading to Kubernetes v1.25+
+
+Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
+
+As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `enablePSP` set to `false` if it has been previously set to `true`.
+
+> **Note:**
+> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
+>
+> If your charts get stuck in this state, you may have to clean up your Helm release secrets.
+Upon setting `enablePSP` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
+
+As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Longhorn docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.
+
+## Installation
+1. Add Longhorn chart repository.
+```
+helm repo add longhorn https://charts.longhorn.io
+```
+
+2. Update local Longhorn chart information from chart repository.
+```
+helm repo update
+```
+
+3. Install Longhorn chart.
+- With Helm 2, the following command will create the `longhorn-system` namespace and install the Longhorn chart together.
+```
+helm install longhorn/longhorn --name longhorn --namespace longhorn-system
+```
+- With Helm 3, the following commands will create the `longhorn-system` namespace first, then install the Longhorn chart.
+
+```
+kubectl create namespace longhorn-system
+helm install longhorn longhorn/longhorn --namespace longhorn-system
+```
+
+## Uninstallation
+
+With Helm 2 to uninstall Longhorn.
+```
+kubectl -n longhorn-system patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag
+helm delete longhorn --purge
+```
+
+With Helm 3 to uninstall Longhorn.
+```
+kubectl -n longhorn-system patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag
+helm uninstall longhorn -n longhorn-system
+kubectl delete namespace longhorn-system
+```
+
+---
+Please see [link](https://github.com/longhorn/longhorn) for more information.
diff --git a/charts/longhorn/app-readme.md b/charts/longhorn/app-readme.md
new file mode 100644
index 0000000..cb23135
--- /dev/null
+++ b/charts/longhorn/app-readme.md
@@ -0,0 +1,11 @@
+# Longhorn
+
+Longhorn is a lightweight, reliable and easy to use distributed block storage system for Kubernetes. Once deployed, users can leverage persistent volumes provided by Longhorn.
+
+Longhorn creates a dedicated storage controller for each volume and synchronously replicates the volume across multiple replicas stored on multiple nodes. The storage controller and replicas are themselves orchestrated using Kubernetes. Longhorn supports snapshots, backups and even allows you to schedule recurring snapshots and backups!
+
+**Important**: Please install Longhorn chart in `longhorn-system` namespace only.
+
+**Warning**: Longhorn doesn't support downgrading from a higher version to a lower version.
+
+[Chart Documentation](https://github.com/longhorn/longhorn/blob/master/chart/README.md)
diff --git a/charts/longhorn/questions.yaml b/charts/longhorn/questions.yaml
new file mode 100644
index 0000000..b53b0fe
--- /dev/null
+++ b/charts/longhorn/questions.yaml
@@ -0,0 +1,890 @@
+categories:
+- storage
+namespace: longhorn-system
+questions:
+- variable: image.defaultImage
+ default: "true"
+ description: "Use default Longhorn images"
+ label: Use Default Images
+ type: boolean
+ show_subquestion_if: false
+ group: "Longhorn Images"
+ subquestions:
+ - variable: image.longhorn.manager.repository
+ default: longhornio/longhorn-manager
+ description: "Specify Longhorn Manager Image Repository"
+ type: string
+ label: Longhorn Manager Image Repository
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.manager.tag
+ default: v1.5.2
+ description: "Specify Longhorn Manager Image Tag"
+ type: string
+ label: Longhorn Manager Image Tag
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.engine.repository
+ default: longhornio/longhorn-engine
+ description: "Specify Longhorn Engine Image Repository"
+ type: string
+ label: Longhorn Engine Image Repository
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.engine.tag
+ default: v1.5.2
+ description: "Specify Longhorn Engine Image Tag"
+ type: string
+ label: Longhorn Engine Image Tag
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.ui.repository
+ default: longhornio/longhorn-ui
+ description: "Specify Longhorn UI Image Repository"
+ type: string
+ label: Longhorn UI Image Repository
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.ui.tag
+ default: v1.5.2
+ description: "Specify Longhorn UI Image Tag"
+ type: string
+ label: Longhorn UI Image Tag
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.instanceManager.repository
+ default: longhornio/longhorn-instance-manager
+ description: "Specify Longhorn Instance Manager Image Repository"
+ type: string
+ label: Longhorn Instance Manager Image Repository
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.instanceManager.tag
+ default: v1.5.2
+ description: "Specify Longhorn Instance Manager Image Tag"
+ type: string
+ label: Longhorn Instance Manager Image Tag
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.shareManager.repository
+ default: longhornio/longhorn-share-manager
+ description: "Specify Longhorn Share Manager Image Repository"
+ type: string
+ label: Longhorn Share Manager Image Repository
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.shareManager.tag
+ default: v1.5.2
+ description: "Specify Longhorn Share Manager Image Tag"
+ type: string
+ label: Longhorn Share Manager Image Tag
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.backingImageManager.repository
+ default: longhornio/backing-image-manager
+ description: "Specify Longhorn Backing Image Manager Image Repository"
+ type: string
+ label: Longhorn Backing Image Manager Image Repository
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.backingImageManager.tag
+ default: v1.5.2
+ description: "Specify Longhorn Backing Image Manager Image Tag"
+ type: string
+ label: Longhorn Backing Image Manager Image Tag
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.supportBundleKit.repository
+ default: longhornio/support-bundle-kit
+ description: "Specify Longhorn Support Bundle Manager Image Repository"
+ type: string
+ label: Longhorn Support Bundle Kit Image Repository
+ group: "Longhorn Images Settings"
+ - variable: image.longhorn.supportBundleKit.tag
+ default: v0.0.27
+ description: "Specify Longhorn Support Bundle Manager Image Tag"
+ type: string
+ label: Longhorn Support Bundle Kit Image Tag
+ group: "Longhorn Images Settings"
+ - variable: image.csi.attacher.repository
+ default: longhornio/csi-attacher
+ description: "Specify CSI attacher image repository. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Attacher Image Repository
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.attacher.tag
+ default: v4.2.0
+ description: "Specify CSI attacher image tag. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Attacher Image Tag
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.provisioner.repository
+ default: longhornio/csi-provisioner
+ description: "Specify CSI provisioner image repository. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Provisioner Image Repository
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.provisioner.tag
+ default: v3.4.1
+ description: "Specify CSI provisioner image tag. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Provisioner Image Tag
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.nodeDriverRegistrar.repository
+ default: longhornio/csi-node-driver-registrar
+ description: "Specify CSI Node Driver Registrar image repository. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Node Driver Registrar Image Repository
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.nodeDriverRegistrar.tag
+ default: v2.7.0
+ description: "Specify CSI Node Driver Registrar image tag. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Node Driver Registrar Image Tag
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.resizer.repository
+ default: longhornio/csi-resizer
+ description: "Specify CSI Driver Resizer image repository. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Driver Resizer Image Repository
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.resizer.tag
+ default: v1.7.0
+ description: "Specify CSI Driver Resizer image tag. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Driver Resizer Image Tag
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.snapshotter.repository
+ default: longhornio/csi-snapshotter
+ description: "Specify CSI Driver Snapshotter image repository. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Driver Snapshotter Image Repository
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.snapshotter.tag
+ default: v6.2.1
+ description: "Specify CSI Driver Snapshotter image tag. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Driver Snapshotter Image Tag
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.livenessProbe.repository
+ default: longhornio/livenessprobe
+ description: "Specify CSI liveness probe image repository. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Liveness Probe Image Repository
+ group: "Longhorn CSI Driver Images"
+ - variable: image.csi.livenessProbe.tag
+ default: v2.9.0
+ description: "Specify CSI liveness probe image tag. Leave blank to autodetect."
+ type: string
+ label: Longhorn CSI Liveness Probe Image Tag
+ group: "Longhorn CSI Driver Images"
+- variable: privateRegistry.registryUrl
+ label: Private registry URL
+ description: "URL of private registry. Leave blank to apply system default registry."
+ group: "Private Registry Settings"
+ type: string
+ default: ""
+- variable: privateRegistry.registrySecret
+ label: Private registry secret name
+ description: "If create a new private registry secret is true, create a Kubernetes secret with this name; else use the existing secret of this name. Use it to pull images from your private registry."
+ group: "Private Registry Settings"
+ type: string
+ default: ""
+- variable: privateRegistry.createSecret
+ default: "true"
+ description: "Create a new private registry secret"
+ type: boolean
+ group: "Private Registry Settings"
+ label: Create Secret for Private Registry Settings
+ show_subquestion_if: true
+ subquestions:
+ - variable: privateRegistry.registryUser
+ label: Private registry user
+ description: "User used to authenticate to private registry."
+ type: string
+ default: ""
+ - variable: privateRegistry.registryPasswd
+ label: Private registry password
+ description: "Password used to authenticate to private registry."
+ type: password
+ default: ""
+- variable: longhorn.default_setting
+ default: "false"
+ description: "Customize the default settings before installing Longhorn for the first time. This option will only work if the cluster hasn't installed Longhorn."
+ label: "Customize Default Settings"
+ type: boolean
+ show_subquestion_if: true
+ group: "Longhorn Default Settings"
+ subquestions:
+ - variable: csi.kubeletRootDir
+ default:
+ description: "Specify kubelet root-dir. Leave blank to autodetect."
+ type: string
+ label: Kubelet Root Directory
+ group: "Longhorn CSI Driver Settings"
+ - variable: csi.attacherReplicaCount
+ type: int
+ default: 3
+ min: 1
+ max: 10
+ description: "Specify replica count of CSI Attacher. By default 3."
+ label: Longhorn CSI Attacher replica count
+ group: "Longhorn CSI Driver Settings"
+ - variable: csi.provisionerReplicaCount
+ type: int
+ default: 3
+ min: 1
+ max: 10
+ description: "Specify replica count of CSI Provisioner. By default 3."
+ label: Longhorn CSI Provisioner replica count
+ group: "Longhorn CSI Driver Settings"
+ - variable: csi.resizerReplicaCount
+ type: int
+ default: 3
+ min: 1
+ max: 10
+ description: "Specify replica count of CSI Resizer. By default 3."
+ label: Longhorn CSI Resizer replica count
+ group: "Longhorn CSI Driver Settings"
+ - variable: csi.snapshotterReplicaCount
+ type: int
+ default: 3
+ min: 1
+ max: 10
+ description: "Specify replica count of CSI Snapshotter. By default 3."
+ label: Longhorn CSI Snapshotter replica count
+ group: "Longhorn CSI Driver Settings"
+ - variable: defaultSettings.backupTarget
+ label: Backup Target
+ description: "The endpoint used to access the backupstore. NFS and S3 are supported."
+ group: "Longhorn Default Settings"
+ type: string
+ default:
+ - variable: defaultSettings.backupTargetCredentialSecret
+ label: Backup Target Credential Secret
+ description: "The name of the Kubernetes secret associated with the backup target."
+ group: "Longhorn Default Settings"
+ type: string
+ default:
+ - variable: defaultSettings.allowRecurringJobWhileVolumeDetached
+ label: Allow Recurring Job While Volume Is Detached
+ description: 'If this setting is enabled, Longhorn will automatically attaches the volume and takes snapshot/backup when it is the time to do recurring snapshot/backup.
+Note that the volume is not ready for workload during the period when the volume was automatically attached. Workload will have to wait until the recurring job finishes.'
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "false"
+ - variable: defaultSettings.createDefaultDiskLabeledNodes
+ label: Create Default Disk on Labeled Nodes
+ description: 'Create default Disk automatically only on Nodes with the label "node.longhorn.io/create-default-disk=true" if no other disks exist. If disabled, the default disk will be created on all new nodes when each node is first added.'
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "false"
+ - variable: defaultSettings.defaultDataPath
+ label: Default Data Path
+ description: 'Default path to use for storing data on a host. By default "/var/lib/longhorn/"'
+ group: "Longhorn Default Settings"
+ type: string
+ default: "/var/lib/longhorn/"
+ - variable: defaultSettings.defaultDataLocality
+ label: Default Data Locality
+ description: 'We say a Longhorn volume has data locality if there is a local replica of the volume on the same node as the pod which is using the volume.
+This setting specifies the default data locality when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `dataLocality` in the StorageClass
+The available modes are:
+- **disabled**. This is the default option. There may or may not be a replica on the same node as the attached volume (workload)
+- **best-effort**. This option instructs Longhorn to try to keep a replica on the same node as the attached volume (workload). Longhorn will not stop the volume, even if it cannot keep a replica local to the attached volume (workload) due to environment limitation, e.g. not enough disk space, incompatible disk tags, etc.'
+ group: "Longhorn Default Settings"
+ type: enum
+ options:
+ - "disabled"
+ - "best-effort"
+ default: "disabled"
+ - variable: defaultSettings.replicaSoftAntiAffinity
+ label: Replica Node Level Soft Anti-Affinity
+ description: 'Allow scheduling on nodes with existing healthy replicas of the same volume. By default false.'
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "false"
+ - variable: defaultSettings.replicaAutoBalance
+ label: Replica Auto Balance
+ description: 'Enable this setting automatically rebalances replicas when discovered an available node.
+The available global options are:
+- **disabled**. This is the default option. No replica auto-balance will be done.
+- **least-effort**. This option instructs Longhorn to balance replicas for minimal redundancy.
+- **best-effort**. This option instructs Longhorn to balance replicas for even redundancy.
+Longhorn also support individual volume setting. The setting can be specified in volume.spec.replicaAutoBalance, this overrules the global setting.
+The available volume spec options are:
+- **ignored**. This is the default option that instructs Longhorn to inherit from the global setting.
+- **disabled**. This option instructs Longhorn no replica auto-balance should be done.
+- **least-effort**. This option instructs Longhorn to balance replicas for minimal redundancy.
+- **best-effort**. This option instructs Longhorn to balance replicas for even redundancy.'
+ group: "Longhorn Default Settings"
+ type: enum
+ options:
+ - "disabled"
+ - "least-effort"
+ - "best-effort"
+ default: "disabled"
+ - variable: defaultSettings.storageOverProvisioningPercentage
+ label: Storage Over Provisioning Percentage
+ description: "The over-provisioning percentage defines how much storage can be allocated relative to the hard drive's capacity. By default 200."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 200
+ - variable: defaultSettings.storageMinimalAvailablePercentage
+ label: Storage Minimal Available Percentage
+ description: "If the minimum available disk capacity exceeds the actual percentage of available disk capacity, the disk becomes unschedulable until more space is freed up. By default 25."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ max: 100
+ default: 25
+ - variable: defaultSettings.storageReservedPercentageForDefaultDisk
+ label: Storage Reserved Percentage For Default Disk
+ description: "The reserved percentage specifies the percentage of disk space that will not be allocated to the default disk on each new Longhorn node."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ max: 100
+ default: 30
+ - variable: defaultSettings.upgradeChecker
+ label: Enable Upgrade Checker
+ description: 'Upgrade Checker will check for new Longhorn version periodically. When there is a new version available, a notification will appear in the UI. By default true.'
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "true"
+ - variable: defaultSettings.defaultReplicaCount
+ label: Default Replica Count
+ description: "The default number of replicas when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `numberOfReplicas` in the StorageClass. By default 3."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 1
+ max: 20
+ default: 3
+ - variable: defaultSettings.defaultLonghornStaticStorageClass
+ label: Default Longhorn Static StorageClass Name
+ description: "The 'storageClassName' is given to PVs and PVCs that are created for an existing Longhorn volume. The StorageClass name can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. By default 'longhorn-static'."
+ group: "Longhorn Default Settings"
+ type: string
+ default: "longhorn-static"
+ - variable: defaultSettings.backupstorePollInterval
+ label: Backupstore Poll Interval
+ description: "In seconds. The backupstore poll interval determines how often Longhorn checks the backupstore for new backups. Set to 0 to disable the polling. By default 300."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 300
+ - variable: defaultSettings.failedBackupTTL
+ label: Failed Backup Time to Live
+ description: "In minutes. This setting determines how long Longhorn will keep the backup resource that was failed. Set to 0 to disable the auto-deletion.
+Failed backups will be checked and cleaned up during backupstore polling which is controlled by **Backupstore Poll Interval** setting.
+Hence this value determines the minimal wait interval of the cleanup. And the actual cleanup interval is multiple of **Backupstore Poll Interval**.
+Disabling **Backupstore Poll Interval** also means to disable failed backup auto-deletion."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 1440
+ - variable: defaultSettings.restoreVolumeRecurringJobs
+ label: Restore Volume Recurring Jobs
+ description: "Restore recurring jobs from the backup volume on the backup target and create recurring jobs if not exist during a backup restoration.
+Longhorn also supports individual volume setting. The setting can be specified on Backup page when making a backup restoration, this overrules the global setting.
+The available volume setting options are:
+- **ignored**. This is the default option that instructs Longhorn to inherit from the global setting.
+- **enabled**. This option instructs Longhorn to restore recurring jobs/groups from the backup target forcibly.
+- **disabled**. This option instructs Longhorn no restoring recurring jobs/groups should be done."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "false"
+ - variable: defaultSettings.recurringSuccessfulJobsHistoryLimit
+ label: Cronjob Successful Jobs History Limit
+ description: "This setting specifies how many successful backup or snapshot job histories should be retained. History will not be retained if the value is 0."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 1
+ - variable: defaultSettings.recurringFailedJobsHistoryLimit
+ label: Cronjob Failed Jobs History Limit
+ description: "This setting specifies how many failed backup or snapshot job histories should be retained. History will not be retained if the value is 0."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 1
+ - variable: defaultSettings.supportBundleFailedHistoryLimit
+ label: SupportBundle Failed History Limit
+ description: "This setting specifies how many failed support bundles can exist in the cluster.
+The retained failed support bundle is for analysis purposes and needs to clean up manually.
+Set this value to **0** to have Longhorn automatically purge all failed support bundles."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 1
+ - variable: defaultSettings.autoSalvage
+ label: Automatic salvage
+ description: "If enabled, volumes will be automatically salvaged when all the replicas become faulty e.g. due to network disconnection. Longhorn will try to figure out which replica(s) are usable, then use them for the volume. By default true."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "true"
+ - variable: defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly
+ label: Automatically Delete Workload Pod when The Volume Is Detached Unexpectedly
+ description: 'If enabled, Longhorn will automatically delete the workload pod that is managed by a controller (e.g. deployment, statefulset, daemonset, etc...) when Longhorn volume is detached unexpectedly (e.g. during Kubernetes upgrade, Docker reboot, or network disconnect). By deleting the pod, its controller restarts the pod and Kubernetes handles volume reattachment and remount.
+If disabled, Longhorn will not delete the workload pod that is managed by a controller. You will have to manually restart the pod to reattach and remount the volume.
+**Note:** This setting does not apply to the workload pods that do not have a controller. Longhorn never deletes them.'
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "true"
+ - variable: defaultSettings.disableSchedulingOnCordonedNode
+ label: Disable Scheduling On Cordoned Node
+ description: "Disable Longhorn manager to schedule replica on Kubernetes cordoned node. By default true."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "true"
+ - variable: defaultSettings.replicaZoneSoftAntiAffinity
+ label: Replica Zone Level Soft Anti-Affinity
+ description: "Allow scheduling new Replicas of Volume to the Nodes in the same Zone as existing healthy Replicas. Nodes don't belong to any Zone will be treated as in the same Zone. Notice that Longhorn relies on label `topology.kubernetes.io/zone=<Zone name of the node>` in the Kubernetes node object to identify the zone. By default true."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "true"
+ - variable: defaultSettings.nodeDownPodDeletionPolicy
+ label: Pod Deletion Policy When Node is Down
+ description: "Defines the Longhorn action when a Volume is stuck with a StatefulSet/Deployment Pod on a node that is down.
+- **do-nothing** is the default Kubernetes behavior of never force deleting StatefulSet/Deployment terminating pods. Since the pod on the node that is down isn't removed, Longhorn volumes are stuck on nodes that are down.
+- **delete-statefulset-pod** Longhorn will force delete StatefulSet terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods.
+- **delete-deployment-pod** Longhorn will force delete Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods.
+- **delete-both-statefulset-and-deployment-pod** Longhorn will force delete StatefulSet/Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods."
+ group: "Longhorn Default Settings"
+ type: enum
+ options:
+ - "do-nothing"
+ - "delete-statefulset-pod"
+ - "delete-deployment-pod"
+ - "delete-both-statefulset-and-deployment-pod"
+ default: "do-nothing"
+ - variable: defaultSettings.nodeDrainPolicy
+ label: Node Drain Policy
+ description: "Define the policy to use when a node with the last healthy replica of a volume is drained.
+- **block-if-contains-last-replica** Longhorn will block the drain when the node contains the last healthy replica of a volume.
+- **allow-if-replica-is-stopped** Longhorn will allow the drain when the node contains the last healthy replica of a volume but the replica is stopped. WARNING: possible data loss if the node is removed after draining. Select this option if you want to drain the node and do in-place upgrade/maintenance.
+- **always-allow** Longhorn will allow the drain even though the node contains the last healthy replica of a volume. WARNING: possible data loss if the node is removed after draining. Also possible data corruption if the last replica was running during the draining."
+ group: "Longhorn Default Settings"
+ type: enum
+ options:
+ - "block-if-contains-last-replica"
+ - "allow-if-replica-is-stopped"
+ - "always-allow"
+ default: "block-if-contains-last-replica"
+ - variable: defaultSettings.replicaReplenishmentWaitInterval
+ label: Replica Replenishment Wait Interval
+ description: "In seconds. The interval determines how long Longhorn will wait at least in order to reuse the existing data on a failed replica rather than directly creating a new replica for a degraded volume.
+Warning: This option works only when there is a failed replica in the volume. And this option may block the rebuilding for a while in the case."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 600
+ - variable: defaultSettings.concurrentReplicaRebuildPerNodeLimit
+ label: Concurrent Replica Rebuild Per Node Limit
+ description: "This setting controls how many replicas on a node can be rebuilt simultaneously.
+Typically, Longhorn can block the replica starting once the current rebuilding count on a node exceeds the limit. But when the value is 0, it means disabling the replica rebuilding.
+WARNING:
+- The old setting \"Disable Replica Rebuild\" is replaced by this setting.
+- Different from relying on replica starting delay to limit the concurrent rebuilding, if the rebuilding is disabled, replica object replenishment will be directly skipped.
+- When the value is 0, the eviction and data locality feature won't work. But this shouldn't have any impact to any current replica rebuild and backup restore."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 5
+ - variable: defaultSettings.concurrentVolumeBackupRestorePerNodeLimit
+ label: Concurrent Volume Backup Restore Per Node Limit
+ description: "This setting controls how many volumes on a node can restore the backup concurrently.
+Longhorn blocks the backup restore once the restoring volume count exceeds the limit.
+Set the value to **0** to disable backup restore."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 5
+ - variable: defaultSettings.disableRevisionCounter
+ label: Disable Revision Counter
+ description: "This setting is only for volumes created by UI. By default, this is false meaning there will be a reivision counter file to track every write to the volume. During salvage recovering Longhorn will pick the replica with largest reivision counter as candidate to recover the whole volume. If revision counter is disabled, Longhorn will not track every write to the volume. During the salvage recovering, Longhorn will use the 'volume-head-xxx.img' file last modification time and file size to pick the replica candidate to recover the whole volume."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "false"
+ - variable: defaultSettings.systemManagedPodsImagePullPolicy
+ label: System Managed Pod Image Pull Policy
+ description: "This setting defines the Image Pull Policy of Longhorn system managed pods, e.g. instance manager, engine image, CSI driver, etc. The new Image Pull Policy will only apply after the system managed pods restart."
+ group: "Longhorn Default Settings"
+ type: enum
+ options:
+ - "if-not-present"
+ - "always"
+ - "never"
+ default: "if-not-present"
+ - variable: defaultSettings.allowVolumeCreationWithDegradedAvailability
+ label: Allow Volume Creation with Degraded Availability
+ description: "This setting allows user to create and attach a volume that doesn't have all the replicas scheduled at the time of creation."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "true"
+ - variable: defaultSettings.autoCleanupSystemGeneratedSnapshot
+ label: Automatically Cleanup System Generated Snapshot
+ description: "This setting enables Longhorn to automatically cleanup the system generated snapshot after replica rebuild is done."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "true"
+ - variable: defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit
+ label: Concurrent Automatic Engine Upgrade Per Node Limit
+ description: "This setting controls how Longhorn automatically upgrades volumes' engines to the new default engine image after upgrading Longhorn manager. The value of this setting specifies the maximum number of engines per node that are allowed to upgrade to the default engine image at the same time. If the value is 0, Longhorn will not automatically upgrade volumes' engines to default version."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 0
+ - variable: defaultSettings.backingImageCleanupWaitInterval
+ label: Backing Image Cleanup Wait Interval
+ description: "This interval in minutes determines how long Longhorn will wait before cleaning up the backing image file when there is no replica in the disk using it."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 60
+ - variable: defaultSettings.backingImageRecoveryWaitInterval
+ label: Backing Image Recovery Wait Interval
+ description: "This interval in seconds determines how long Longhorn will wait before re-downloading the backing image file when all disk files of this backing image become failed or unknown.
+ WARNING:
+ - This recovery only works for the backing image of which the creation type is \"download\".
+ - File state \"unknown\" means the related manager pods on the pod is not running or the node itself is down/disconnected."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ default: 300
+ - variable: defaultSettings.guaranteedInstanceManagerCPU
+ label: Guaranteed Instance Manager CPU
+ description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each instance manager Pod. For example, 10 means 10% of the total CPU on a node will be allocated to each instance manager pod on this node. This will help maintain engine and replica stability during high node workload.
+ In order to prevent unexpected volume instance (engine/replica) crash as well as guarantee a relative acceptable IO performance, you can use the following formula to calculate a value for this setting:
+ `Guaranteed Instance Manager CPU = The estimated max Longhorn volume engine and replica count on a node * 0.1 / The total allocatable CPUs on the node * 100`
+ The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting.
+ If it's hard to estimate the usage now, you can leave it with the default value, which is 12%. Then you can tune it when there is no running workload using Longhorn volumes.
+ WARNING:
+ - Value 0 means unsetting CPU requests for instance manager pods.
+ - Considering the possible new instance manager pods in the further system upgrade, this integer value is range from 0 to 40.
+ - One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then.
+ - This global setting will be ignored for a node if the field \"InstanceManagerCPURequest\" on the node is set.
+ - After this setting is changed, all instance manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 0
+ max: 40
+ default: 12
+ - variable: defaultSettings.logLevel
+ label: Log Level
+ description: "The log level Panic, Fatal, Error, Warn, Info, Debug, Trace used in longhorn manager. By default Debug."
+ group: "Longhorn Default Settings"
+ type: string
+ default: "Info"
+- variable: defaultSettings.kubernetesClusterAutoscalerEnabled
+ label: Kubernetes Cluster Autoscaler Enabled (Experimental)
+ description: "Enabling this setting will notify Longhorn that the cluster is using Kubernetes Cluster Autoscaler.
+ Longhorn prevents data loss by only allowing the Cluster Autoscaler to scale down a node that met all conditions:
+ - No volume attached to the node.
+ - Is not the last node containing the replica of any volume.
+ - Is not running backing image components pod.
+ - Is not running share manager components pod."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: false
+- variable: defaultSettings.orphanAutoDeletion
+ label: Orphaned Data Cleanup
+ description: "This setting allows Longhorn to delete the orphan resource and its corresponding orphaned data automatically like stale replicas. Orphan resources on down or unknown nodes will not be cleaned up automatically."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: false
+- variable: defaultSettings.storageNetwork
+ label: Storage Network
+ description: "Longhorn uses the storage network for in-cluster data traffic. Leave this blank to use the Kubernetes cluster network.
+ To segregate the storage network, input the pre-existing NetworkAttachmentDefinition in \"<namespace>/<name>\" format.
+ WARNING:
+ - The cluster must have pre-existing Multus installed, and NetworkAttachmentDefinition IPs are reachable between nodes.
+ - DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES. Longhorn will try to block this setting update when there are attached volumes.
+ - When applying the setting, Longhorn will restart all manager, instance-manager, and backing-image-manager pods."
+ group: "Longhorn Default Settings"
+ type: string
+ default:
+- variable: defaultSettings.deletingConfirmationFlag
+ label: Deleting Confirmation Flag
+ description: "This flag is designed to prevent Longhorn from being accidentally uninstalled which will lead to data lost.
+ Set this flag to **true** to allow Longhorn uninstallation.
+ If this flag **false**, Longhorn uninstallation job will fail. "
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "false"
+- variable: defaultSettings.engineReplicaTimeout
+ label: Timeout between Engine and Replica
+ description: "In seconds. The setting specifies the timeout between the engine and replica(s), and the value should be between 8 to 30 seconds. The default value is 8 seconds."
+ group: "Longhorn Default Settings"
+ type: int
+ default: "8"
+- variable: defaultSettings.snapshotDataIntegrity
+ label: Snapshot Data Integrity
+ description: "This setting allows users to enable or disable snapshot hashing and data integrity checking.
+ Available options are
+ - **disabled**: Disable snapshot disk file hashing and data integrity checking.
+ - **enabled**: Enables periodic snapshot disk file hashing and data integrity checking. To detect the filesystem-unaware corruption caused by bit rot or other issues in snapshot disk files, Longhorn system periodically hashes files and finds corrupted ones. Hence, the system performance will be impacted during the periodical checking.
+ - **fast-check**: Enable snapshot disk file hashing and fast data integrity checking. Longhorn system only hashes snapshot disk files if their are not hashed or the modification time are changed. In this mode, filesystem-unaware corruption cannot be detected, but the impact on system performance can be minimized."
+ group: "Longhorn Default Settings"
+ type: string
+ default: "disabled"
+- variable: defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation
+ label: Immediate Snapshot Data Integrity Check After Creating a Snapshot
+ description: "Hashing snapshot disk files impacts the performance of the system. The immediate snapshot hashing and checking can be disabled to minimize the impact after creating a snapshot."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "false"
+- variable: defaultSettings.snapshotDataIntegrityCronjob
+ label: Snapshot Data Integrity Check CronJob
+ description: "Unix-cron string format. The setting specifies when Longhorn checks the data integrity of snapshot disk files.
+ Warning: Hashing snapshot disk files impacts the performance of the system. It is recommended to run data integrity checks during off-peak times and to reduce the frequency of checks."
+ group: "Longhorn Default Settings"
+ type: string
+ default: "0 0 */7 * *"
+- variable: defaultSettings.removeSnapshotsDuringFilesystemTrim
+ label: Remove Snapshots During Filesystem Trim
+ description: "This setting allows Longhorn filesystem trim feature to automatically mark the latest snapshot and its ancestors as removed and stops at the snapshot containing multiple children.\n\n
+ Since Longhorn filesystem trim feature can be applied to the volume head and the followed continuous removed or system snapshots only.\n\n
+ Notice that trying to trim a removed files from a valid snapshot will do nothing but the filesystem will discard this kind of in-memory trimmable file info.\n\n
+ Later on if you mark the snapshot as removed and want to retry the trim, you may need to unmount and remount the filesystem so that the filesystem can recollect the trimmable file info."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: "false"
+- variable: defaultSettings.fastReplicaRebuildEnabled
+ label: Fast Replica Rebuild Enabled
+ description: "This feature supports the fast replica rebuilding. It relies on the checksum of snapshot disk files, so setting the snapshot-data-integrity to **enable** or **fast-check** is a prerequisite."
+ group: "Longhorn Default Settings"
+ type: boolean
+ default: false
+- variable: defaultSettings.replicaFileSyncHttpClientTimeout
+ label: Timeout of HTTP Client to Replica File Sync Server
+ description: "In seconds. The setting specifies the HTTP client timeout to the file sync server."
+ group: "Longhorn Default Settings"
+ type: int
+ default: "30"
+- variable: defaultSettings.backupCompressionMethod
+ label: Backup Compression Method
+ description: "This setting allows users to specify backup compression method.
+ Available options are
+ - **none**: Disable the compression method. Suitable for multimedia data such as encoded images and videos.
+ - **lz4**: Fast compression method. Suitable for flat files.
+ - **gzip**: A bit of higher compression ratio but relatively slow."
+ group: "Longhorn Default Settings"
+ type: string
+ default: "lz4"
+- variable: defaultSettings.backupConcurrentLimit
+ label: Backup Concurrent Limit Per Backup
+ description: "This setting controls how many worker threads per backup concurrently."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 1
+ default: 2
+- variable: defaultSettings.restoreConcurrentLimit
+ label: Restore Concurrent Limit Per Backup
+ description: "This setting controls how many worker threads per restore concurrently."
+ group: "Longhorn Default Settings"
+ type: int
+ min: 1
+ default: 2
+- variable: defaultSettings.v2DataEngine
+ label: V2 Data Engine
+ description: "This allows users to activate v2 data engine based on SPDK. Currently, it is in the preview phase and should not be utilized in a production environment.
+ WARNING:
+ - DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES. Longhorn will block this setting update when there are attached volumes.
+ - When applying the setting, Longhorn will restart all instance-manager pods.
+ - When the V2 Data Engine is enabled, each instance-manager pod utilizes 1 CPU core. This high CPU usage is attributed to the spdk_tgt process running within each instance-manager pod. The spdk_tgt process is responsible for handling input/output (IO) operations and requires intensive polling. As a result, it consumes 100% of a dedicated CPU core to efficiently manage and process the IO requests, ensuring optimal performance and responsiveness for storage operations."
+ group: "Longhorn V2 Data Engine (Preview Feature) Settings"
+ type: boolean
+ default: false
+- variable: defaultSettings.offlineReplicaRebuilding
+ label: Offline Replica Rebuilding
+ description: ""This setting allows users to enable the offline replica rebuilding for volumes using v2 data engine."
+ group: "Longhorn V2 Data Engine (Preview Feature) Settings"
+ required: true
+ type: enum
+ options:
+ - "enabled"
+ - "disabled"
+ default: "enabled"
+- variable: persistence.defaultClass
+ default: "true"
+ description: "Set as default StorageClass for Longhorn"
+ label: Default Storage Class
+ group: "Longhorn Storage Class Settings"
+ required: true
+ type: boolean
+- variable: persistence.reclaimPolicy
+ label: Storage Class Retain Policy
+ description: "Define reclaim policy (Retain or Delete)"
+ group: "Longhorn Storage Class Settings"
+ required: true
+ type: enum
+ options:
+ - "Delete"
+ - "Retain"
+ default: "Delete"
+- variable: persistence.defaultClassReplicaCount
+ description: "Set replica count for Longhorn StorageClass"
+ label: Default Storage Class Replica Count
+ group: "Longhorn Storage Class Settings"
+ type: int
+ min: 1
+ max: 10
+ default: 3
+- variable: persistence.defaultDataLocality
+ description: "Set data locality for Longhorn StorageClass"
+ label: Default Storage Class Data Locality
+ group: "Longhorn Storage Class Settings"
+ type: enum
+ options:
+ - "disabled"
+ - "best-effort"
+ default: "disabled"
+- variable: persistence.recurringJobSelector.enable
+ description: "Enable recurring job selector for Longhorn StorageClass"
+ group: "Longhorn Storage Class Settings"
+ label: Enable Storage Class Recurring Job Selector
+ type: boolean
+ default: false
+ show_subquestion_if: true
+ subquestions:
+ - variable: persistence.recurringJobSelector.jobList
+ description: 'Recurring job selector list for Longhorn StorageClass. Please be careful of quotes of input. e.g., [{"name":"backup", "isGroup":true}]'
+ label: Storage Class Recurring Job Selector List
+ group: "Longhorn Storage Class Settings"
+ type: string
+ default:
+- variable: persistence.defaultNodeSelector.enable
+ description: "Enable Node selector for Longhorn StorageClass"
+ group: "Longhorn Storage Class Settings"
+ label: Enable Storage Class Node Selector
+ type: boolean
+ default: false
+ show_subquestion_if: true
+ subquestions:
+ - variable: persistence.defaultNodeSelector.selector
+ label: Storage Class Node Selector
+ description: 'We use NodeSelector when we want to bind PVC via StorageClass into desired mountpoint on the nodes tagged with its value'
+ group: "Longhorn Storage Class Settings"
+ type: string
+ default:
+- variable: persistence.backingImage.enable
+ description: "Set backing image for Longhorn StorageClass"
+ group: "Longhorn Storage Class Settings"
+ label: Default Storage Class Backing Image
+ type: boolean
+ default: false
+ show_subquestion_if: true
+ subquestions:
+ - variable: persistence.backingImage.name
+ description: 'Specify a backing image that will be used by Longhorn volumes in Longhorn StorageClass. If not exists, the backing image data source type and backing image data source parameters should be specified so that Longhorn will create the backing image before using it.'
+ label: Storage Class Backing Image Name
+ group: "Longhorn Storage Class Settings"
+ type: string
+ default:
+ - variable: persistence.backingImage.expectedChecksum
+ description: 'Specify the expected SHA512 checksum of the selected backing image in Longhorn StorageClass.
+ WARNING:
+ - If the backing image name is not specified, setting this field is meaningless.
+ - It is not recommended to set this field if the data source type is \"export-from-volume\".'
+ label: Storage Class Backing Image Expected SHA512 Checksum
+ group: "Longhorn Storage Class Settings"
+ type: string
+ default:
+ - variable: persistence.backingImage.dataSourceType
+ description: 'Specify the data source type for the backing image used in Longhorn StorageClass.
+ If the backing image does not exists, Longhorn will use this field to create a backing image. Otherwise, Longhorn will use it to verify the selected backing image.
+ WARNING:
+ - If the backing image name is not specified, setting this field is meaningless.
+ - As for backing image creation with data source type \"upload\", it is recommended to do it via UI rather than StorageClass here. Uploading requires file data sending to the Longhorn backend after the object creation, which is complicated if you want to handle it manually.'
+ label: Storage Class Backing Image Data Source Type
+ group: "Longhorn Storage Class Settings"
+ type: enum
+ options:
+ - ""
+ - "download"
+ - "upload"
+ - "export-from-volume"
+ default: ""
+ - variable: persistence.backingImage.dataSourceParameters
+ description: "Specify the data source parameters for the backing image used in Longhorn StorageClass.
+ If the backing image does not exists, Longhorn will use this field to create a backing image. Otherwise, Longhorn will use it to verify the selected backing image.
+ This option accepts a json string of a map. e.g., '{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'.
+ WARNING:
+ - If the backing image name is not specified, setting this field is meaningless.
+ - Be careful of the quotes here."
+ label: Storage Class Backing Image Data Source Parameters
+ group: "Longhorn Storage Class Settings"
+ type: string
+ default:
+- variable: persistence.removeSnapshotsDuringFilesystemTrim
+ description: "Allow automatically removing snapshots during filesystem trim for Longhorn StorageClass"
+ label: Default Storage Class Remove Snapshots During Filesystem Trim
+ group: "Longhorn Storage Class Settings"
+ type: enum
+ options:
+ - "ignored"
+ - "enabled"
+ - "disabled"
+ default: "ignored"
+- variable: ingress.enabled
+ default: "false"
+ description: "Expose app using Layer 7 Load Balancer - ingress"
+ type: boolean
+ group: "Services and Load Balancing"
+ label: Expose app using Layer 7 Load Balancer
+ show_subquestion_if: true
+ subquestions:
+ - variable: ingress.host
+ default: "xip.io"
+ description: "layer 7 Load Balancer hostname"
+ type: hostname
+ required: true
+ label: Layer 7 Load Balancer Hostname
+ - variable: ingress.path
+ default: "/"
+ description: "If ingress is enabled you can set the default ingress path"
+ type: string
+ required: true
+ label: Ingress Path
+- variable: service.ui.type
+ default: "Rancher-Proxy"
+ description: "Define Longhorn UI service type"
+ type: enum
+ options:
+ - "ClusterIP"
+ - "NodePort"
+ - "LoadBalancer"
+ - "Rancher-Proxy"
+ label: Longhorn UI Service
+ show_if: "ingress.enabled=false"
+ group: "Services and Load Balancing"
+ show_subquestion_if: "NodePort"
+ subquestions:
+ - variable: service.ui.nodePort
+ default: ""
+ description: "NodePort port number(to set explicitly, choose port between 30000-32767)"
+ type: int
+ min: 30000
+ max: 32767
+ show_if: "service.ui.type=NodePort||service.ui.type=LoadBalancer"
+ label: UI Service NodePort number
+- variable: enablePSP
+ default: "false"
+ description: "Setup a pod security policy for Longhorn workloads."
+ label: Pod Security Policy
+ type: boolean
+ group: "Other Settings"
+- variable: global.cattle.windowsCluster.enabled
+ default: "false"
+ description: "Enable this to allow Longhorn to run on the Rancher deployed Windows cluster."
+ label: Rancher Windows Cluster
+ type: boolean
+ group: "Other Settings"
+- variable: networkPolicies.enabled
+ description: "Enable NetworkPolicies to limit access to the longhorn pods.
+ Warning: The Rancher Proxy will not work if this feature is enabled and a custom NetworkPolicy must be added."
+ group: "Other Settings"
+ label: Network Policies
+ default: "false"
+ type: boolean
+ subquestions:
+ - variable: networkPolicies.type
+ label: Network Policies for Ingress
+ description: "Create the policy to allow access for the ingress, select the distribution."
+ show_if: "networkPolicies.enabled=true&&ingress.enabled=true"
+ type: enum
+ default: "rke2"
+ options:
+ - "rke1"
+ - "rke2"
+ - "k3s"
diff --git a/charts/longhorn/templates/NOTES.txt b/charts/longhorn/templates/NOTES.txt
new file mode 100644
index 0000000..cca7cd7
--- /dev/null
+++ b/charts/longhorn/templates/NOTES.txt
@@ -0,0 +1,5 @@
+Longhorn is now installed on the cluster!
+
+Please wait a few minutes for other Longhorn components such as CSI deployments, Engine Images, and Instance Managers to be initialized.
+
+Visit our documentation at https://longhorn.io/docs/
diff --git a/charts/longhorn/templates/_helpers.tpl b/charts/longhorn/templates/_helpers.tpl
new file mode 100644
index 0000000..3fbc2ac
--- /dev/null
+++ b/charts/longhorn/templates/_helpers.tpl
@@ -0,0 +1,66 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "longhorn.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "longhorn.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+
+{{- define "longhorn.managerIP" -}}
+{{- $fullname := (include "longhorn.fullname" .) -}}
+{{- printf "http://%s-backend:9500" $fullname | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+
+{{- define "secret" }}
+{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.privateRegistry.registryUrl (printf "%s:%s" .Values.privateRegistry.registryUser .Values.privateRegistry.registryPasswd | b64enc) | b64enc }}
+{{- end }}
+
+{{- /*
+longhorn.labels generates the standard Helm labels.
+*/ -}}
+{{- define "longhorn.labels" -}}
+app.kubernetes.io/name: {{ template "longhorn.name" . }}
+helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+app.kubernetes.io/version: {{ .Chart.AppVersion }}
+{{- end -}}
+
+
+{{- define "system_default_registry" -}}
+{{- if .Values.global.cattle.systemDefaultRegistry -}}
+{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
+{{- else -}}
+{{- "" -}}
+{{- end -}}
+{{- end -}}
+
+{{- define "registry_url" -}}
+{{- if .Values.privateRegistry.registryUrl -}}
+{{- printf "%s/" .Values.privateRegistry.registryUrl -}}
+{{- else -}}
+{{ include "system_default_registry" . }}
+{{- end -}}
+{{- end -}}
+
+{{- /*
+ define the longhorn release namespace
+*/ -}}
+{{- define "release_namespace" -}}
+{{- if .Values.namespaceOverride -}}
+{{- .Values.namespaceOverride -}}
+{{- else -}}
+{{- .Release.Namespace -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/longhorn/templates/clusterrole.yaml b/charts/longhorn/templates/clusterrole.yaml
new file mode 100644
index 0000000..e652a34
--- /dev/null
+++ b/charts/longhorn/templates/clusterrole.yaml
@@ -0,0 +1,61 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: longhorn-role
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+rules:
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - "*"
+- apiGroups: [""]
+ resources: ["pods", "events", "persistentvolumes", "persistentvolumeclaims","persistentvolumeclaims/status", "nodes", "proxy/nodes", "pods/log", "secrets", "services", "endpoints", "configmaps", "serviceaccounts"]
+ verbs: ["*"]
+- apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["get", "list"]
+- apiGroups: ["apps"]
+ resources: ["daemonsets", "statefulsets", "deployments"]
+ verbs: ["*"]
+- apiGroups: ["batch"]
+ resources: ["jobs", "cronjobs"]
+ verbs: ["*"]
+- apiGroups: ["policy"]
+ resources: ["poddisruptionbudgets", "podsecuritypolicies"]
+ verbs: ["*"]
+- apiGroups: ["scheduling.k8s.io"]
+ resources: ["priorityclasses"]
+ verbs: ["watch", "list"]
+- apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses", "volumeattachments", "volumeattachments/status", "csinodes", "csidrivers"]
+ verbs: ["*"]
+- apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotclasses", "volumesnapshots", "volumesnapshotcontents", "volumesnapshotcontents/status"]
+ verbs: ["*"]
+- apiGroups: ["longhorn.io"]
+ resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
+ "engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status",
+ "sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status",
+ "backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status",
+ "backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status",
+ "recurringjobs", "recurringjobs/status", "orphans", "orphans/status", "snapshots", "snapshots/status",
+ "supportbundles", "supportbundles/status", "systembackups", "systembackups/status", "systemrestores", "systemrestores/status",
+ "volumeattachments", "volumeattachments/status"]
+ verbs: ["*"]
+- apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ verbs: ["*"]
+- apiGroups: ["metrics.k8s.io"]
+ resources: ["pods", "nodes"]
+ verbs: ["get", "list"]
+- apiGroups: ["apiregistration.k8s.io"]
+ resources: ["apiservices"]
+ verbs: ["list", "watch"]
+- apiGroups: ["admissionregistration.k8s.io"]
+ resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"]
+ verbs: ["get", "list", "create", "patch", "delete"]
+- apiGroups: ["rbac.authorization.k8s.io"]
+ resources: ["roles", "rolebindings", "clusterrolebindings", "clusterroles"]
+ verbs: ["*"]
diff --git a/charts/longhorn/templates/clusterrolebinding.yaml b/charts/longhorn/templates/clusterrolebinding.yaml
new file mode 100644
index 0000000..8ab944b
--- /dev/null
+++ b/charts/longhorn/templates/clusterrolebinding.yaml
@@ -0,0 +1,27 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: longhorn-bind
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: longhorn-role
+subjects:
+- kind: ServiceAccount
+ name: longhorn-service-account
+ namespace: {{ include "release_namespace" . }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: longhorn-support-bundle
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+- kind: ServiceAccount
+ name: longhorn-support-bundle
+ namespace: {{ include "release_namespace" . }}
diff --git a/charts/longhorn/templates/crds.yaml b/charts/longhorn/templates/crds.yaml
new file mode 100644
index 0000000..ac56efe
--- /dev/null
+++ b/charts/longhorn/templates/crds.yaml
@@ -0,0 +1,3672 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: backingimagedatasources.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: BackingImageDataSource
+ listKind: BackingImageDataSourceList
+ plural: backingimagedatasources
+ shortNames:
+ - lhbids
+ singular: backingimagedatasource
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The current state of the pod used to provision the backing image file from source
+ jsonPath: .status.currentState
+ name: State
+ type: string
+ - description: The data source type
+ jsonPath: .spec.sourceType
+ name: SourceType
+ type: string
+ - description: The node the backing image file will be prepared on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ - description: The disk the backing image file will be prepared on
+ jsonPath: .spec.diskUUID
+ name: DiskUUID
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: BackingImageDataSource is where Longhorn stores backing image data source object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The system generated UUID of the provisioned backing image file
+ jsonPath: .spec.uuid
+ name: UUID
+ type: string
+ - description: The current state of the pod used to provision the backing image file from source
+ jsonPath: .status.currentState
+ name: State
+ type: string
+ - description: The data source type
+ jsonPath: .spec.sourceType
+ name: SourceType
+ type: string
+ - description: The backing image file size
+ jsonPath: .status.size
+ name: Size
+ type: string
+ - description: The node the backing image file will be prepared on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ - description: The disk the backing image file will be prepared on
+ jsonPath: .spec.diskUUID
+ name: DiskUUID
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: BackingImageDataSource is where Longhorn stores backing image data source object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BackingImageDataSourceSpec defines the desired state of the Longhorn backing image data source
+ properties:
+ checksum:
+ type: string
+ diskPath:
+ type: string
+ diskUUID:
+ type: string
+ fileTransferred:
+ type: boolean
+ nodeID:
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ type: object
+ sourceType:
+ enum:
+ - download
+ - upload
+ - export-from-volume
+ type: string
+ uuid:
+ type: string
+ type: object
+ status:
+ description: BackingImageDataSourceStatus defines the observed state of the Longhorn backing image data source
+ properties:
+ checksum:
+ type: string
+ currentState:
+ type: string
+ ip:
+ type: string
+ message:
+ type: string
+ ownerID:
+ type: string
+ progress:
+ type: integer
+ runningParameters:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ size:
+ format: int64
+ type: integer
+ storageIP:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: backingimagemanagers.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: BackingImageManager
+ listKind: BackingImageManagerList
+ plural: backingimagemanagers
+ shortNames:
+ - lhbim
+ singular: backingimagemanager
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The current state of the manager
+ jsonPath: .status.currentState
+ name: State
+ type: string
+ - description: The image the manager pod will use
+ jsonPath: .spec.image
+ name: Image
+ type: string
+ - description: The node the manager is on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ - description: The disk the manager is responsible for
+ jsonPath: .spec.diskUUID
+ name: DiskUUID
+ type: string
+ - description: The disk path the manager is using
+ jsonPath: .spec.diskPath
+ name: DiskPath
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: BackingImageManager is where Longhorn stores backing image manager object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The current state of the manager
+ jsonPath: .status.currentState
+ name: State
+ type: string
+ - description: The image the manager pod will use
+ jsonPath: .spec.image
+ name: Image
+ type: string
+ - description: The node the manager is on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ - description: The disk the manager is responsible for
+ jsonPath: .spec.diskUUID
+ name: DiskUUID
+ type: string
+ - description: The disk path the manager is using
+ jsonPath: .spec.diskPath
+ name: DiskPath
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: BackingImageManager is where Longhorn stores backing image manager object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BackingImageManagerSpec defines the desired state of the Longhorn backing image manager
+ properties:
+ backingImages:
+ additionalProperties:
+ type: string
+ type: object
+ diskPath:
+ type: string
+ diskUUID:
+ type: string
+ image:
+ type: string
+ nodeID:
+ type: string
+ type: object
+ status:
+ description: BackingImageManagerStatus defines the observed state of the Longhorn backing image manager
+ properties:
+ apiMinVersion:
+ type: integer
+ apiVersion:
+ type: integer
+ backingImageFileMap:
+ additionalProperties:
+ properties:
+ currentChecksum:
+ type: string
+ message:
+ type: string
+ name:
+ type: string
+ progress:
+ type: integer
+ senderManagerAddress:
+ type: string
+ sendingReference:
+ type: integer
+ size:
+ format: int64
+ type: integer
+ state:
+ type: string
+ uuid:
+ type: string
+ type: object
+ nullable: true
+ type: object
+ currentState:
+ type: string
+ ip:
+ type: string
+ ownerID:
+ type: string
+ storageIP:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: backingimages.longhorn.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ name: longhorn-conversion-webhook
+ namespace: {{ include "release_namespace" . }}
+ path: /v1/webhook/conversion
+ port: 9501
+ conversionReviewVersions:
+ - v1beta2
+ - v1beta1
+ group: longhorn.io
+ names:
+ kind: BackingImage
+ listKind: BackingImageList
+ plural: backingimages
+ shortNames:
+ - lhbi
+ singular: backingimage
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The backing image name
+ jsonPath: .spec.image
+ name: Image
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: BackingImage is where Longhorn stores backing image object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The system generated UUID
+ jsonPath: .status.uuid
+ name: UUID
+ type: string
+ - description: The source of the backing image file data
+ jsonPath: .spec.sourceType
+ name: SourceType
+ type: string
+ - description: The backing image file size in each disk
+ jsonPath: .status.size
+ name: Size
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: BackingImage is where Longhorn stores backing image object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BackingImageSpec defines the desired state of the Longhorn backing image
+ properties:
+ checksum:
+ type: string
+ disks:
+ additionalProperties:
+ type: string
+ type: object
+ sourceParameters:
+ additionalProperties:
+ type: string
+ type: object
+ sourceType:
+ enum:
+ - download
+ - upload
+ - export-from-volume
+ type: string
+ type: object
+ status:
+ description: BackingImageStatus defines the observed state of the Longhorn backing image status
+ properties:
+ checksum:
+ type: string
+ diskFileStatusMap:
+ additionalProperties:
+ properties:
+ lastStateTransitionTime:
+ type: string
+ message:
+ type: string
+ progress:
+ type: integer
+ state:
+ type: string
+ type: object
+ nullable: true
+ type: object
+ diskLastRefAtMap:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ ownerID:
+ type: string
+ size:
+ format: int64
+ type: integer
+ uuid:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: backups.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: Backup
+ listKind: BackupList
+ plural: backups
+ shortNames:
+ - lhb
+ singular: backup
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The snapshot name
+ jsonPath: .status.snapshotName
+ name: SnapshotName
+ type: string
+ - description: The snapshot size
+ jsonPath: .status.size
+ name: SnapshotSize
+ type: string
+ - description: The snapshot creation time
+ jsonPath: .status.snapshotCreatedAt
+ name: SnapshotCreatedAt
+ type: string
+ - description: The backup state
+ jsonPath: .status.state
+ name: State
+ type: string
+ - description: The backup last synced time
+ jsonPath: .status.lastSyncedAt
+ name: LastSyncedAt
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: Backup is where Longhorn stores backup object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The snapshot name
+ jsonPath: .status.snapshotName
+ name: SnapshotName
+ type: string
+ - description: The snapshot size
+ jsonPath: .status.size
+ name: SnapshotSize
+ type: string
+ - description: The snapshot creation time
+ jsonPath: .status.snapshotCreatedAt
+ name: SnapshotCreatedAt
+ type: string
+ - description: The backup state
+ jsonPath: .status.state
+ name: State
+ type: string
+ - description: The backup last synced time
+ jsonPath: .status.lastSyncedAt
+ name: LastSyncedAt
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: Backup is where Longhorn stores backup object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BackupSpec defines the desired state of the Longhorn backup
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ description: The labels of snapshot backup.
+ type: object
+ snapshotName:
+ description: The snapshot name.
+ type: string
+ syncRequestedAt:
+ description: The time to request run sync the remote backup.
+ format: date-time
+ nullable: true
+ type: string
+ type: object
+ status:
+ description: BackupStatus defines the observed state of the Longhorn backup
+ properties:
+ backupCreatedAt:
+ description: The snapshot backup upload finished time.
+ type: string
+ compressionMethod:
+ description: Compression method
+ type: string
+ error:
+ description: The error message when taking the snapshot backup.
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ description: The labels of snapshot backup.
+ nullable: true
+ type: object
+ lastSyncedAt:
+ description: The last time that the backup was synced with the remote backup target.
+ format: date-time
+ nullable: true
+ type: string
+ messages:
+ additionalProperties:
+ type: string
+ description: The error messages when calling longhorn engine on listing or inspecting backups.
+ nullable: true
+ type: object
+ ownerID:
+ description: The node ID on which the controller is responsible to reconcile this backup CR.
+ type: string
+ progress:
+ description: The snapshot backup progress.
+ type: integer
+ replicaAddress:
+ description: The address of the replica that runs snapshot backup.
+ type: string
+ size:
+ description: The snapshot size.
+ type: string
+ snapshotCreatedAt:
+ description: The snapshot creation time.
+ type: string
+ snapshotName:
+ description: The snapshot name.
+ type: string
+ state:
+ description: The backup creation state. Can be "", "InProgress", "Completed", "Error", "Unknown".
+ type: string
+ url:
+ description: The snapshot backup URL.
+ type: string
+ volumeBackingImageName:
+ description: The volume's backing image name.
+ type: string
+ volumeCreated:
+ description: The volume creation time.
+ type: string
+ volumeName:
+ description: The volume name.
+ type: string
+ volumeSize:
+ description: The volume size.
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: backuptargets.longhorn.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ name: longhorn-conversion-webhook
+ namespace: {{ include "release_namespace" . }}
+ path: /v1/webhook/conversion
+ port: 9501
+ conversionReviewVersions:
+ - v1beta2
+ - v1beta1
+ group: longhorn.io
+ names:
+ kind: BackupTarget
+ listKind: BackupTargetList
+ plural: backuptargets
+ shortNames:
+ - lhbt
+ singular: backuptarget
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The backup target URL
+ jsonPath: .spec.backupTargetURL
+ name: URL
+ type: string
+ - description: The backup target credential secret
+ jsonPath: .spec.credentialSecret
+ name: Credential
+ type: string
+ - description: The backup target poll interval
+ jsonPath: .spec.pollInterval
+ name: LastBackupAt
+ type: string
+ - description: Indicate whether the backup target is available or not
+ jsonPath: .status.available
+ name: Available
+ type: boolean
+ - description: The backup target last synced time
+ jsonPath: .status.lastSyncedAt
+ name: LastSyncedAt
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: BackupTarget is where Longhorn stores backup target object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The backup target URL
+ jsonPath: .spec.backupTargetURL
+ name: URL
+ type: string
+ - description: The backup target credential secret
+ jsonPath: .spec.credentialSecret
+ name: Credential
+ type: string
+ - description: The backup target poll interval
+ jsonPath: .spec.pollInterval
+ name: LastBackupAt
+ type: string
+ - description: Indicate whether the backup target is available or not
+ jsonPath: .status.available
+ name: Available
+ type: boolean
+ - description: The backup target last synced time
+ jsonPath: .status.lastSyncedAt
+ name: LastSyncedAt
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: BackupTarget is where Longhorn stores backup target object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BackupTargetSpec defines the desired state of the Longhorn backup target
+ properties:
+ backupTargetURL:
+ description: The backup target URL.
+ type: string
+ credentialSecret:
+ description: The backup target credential secret.
+ type: string
+ pollInterval:
+ description: The interval that the cluster needs to run sync with the backup target.
+ type: string
+ syncRequestedAt:
+ description: The time to request run sync the remote backup target.
+ format: date-time
+ nullable: true
+ type: string
+ type: object
+ status:
+ description: BackupTargetStatus defines the observed state of the Longhorn backup target
+ properties:
+ available:
+ description: Available indicates if the remote backup target is available or not.
+ type: boolean
+ conditions:
+ description: Records the reason on why the backup target is unavailable.
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ lastSyncedAt:
+ description: The last time that the controller synced with the remote backup target.
+ format: date-time
+ nullable: true
+ type: string
+ ownerID:
+ description: The node ID on which the controller is responsible to reconcile this backup target CR.
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: backupvolumes.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: BackupVolume
+ listKind: BackupVolumeList
+ plural: backupvolumes
+ shortNames:
+ - lhbv
+ singular: backupvolume
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The backup volume creation time
+ jsonPath: .status.createdAt
+ name: CreatedAt
+ type: string
+ - description: The backup volume last backup name
+ jsonPath: .status.lastBackupName
+ name: LastBackupName
+ type: string
+ - description: The backup volume last backup time
+ jsonPath: .status.lastBackupAt
+ name: LastBackupAt
+ type: string
+ - description: The backup volume last synced time
+ jsonPath: .status.lastSyncedAt
+ name: LastSyncedAt
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: BackupVolume is where Longhorn stores backup volume object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The backup volume creation time
+ jsonPath: .status.createdAt
+ name: CreatedAt
+ type: string
+ - description: The backup volume last backup name
+ jsonPath: .status.lastBackupName
+ name: LastBackupName
+ type: string
+ - description: The backup volume last backup time
+ jsonPath: .status.lastBackupAt
+ name: LastBackupAt
+ type: string
+ - description: The backup volume last synced time
+ jsonPath: .status.lastSyncedAt
+ name: LastSyncedAt
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: BackupVolume is where Longhorn stores backup volume object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BackupVolumeSpec defines the desired state of the Longhorn backup volume
+ properties:
+ syncRequestedAt:
+ description: The time to request run sync the remote backup volume.
+ format: date-time
+ nullable: true
+ type: string
+ type: object
+ status:
+ description: BackupVolumeStatus defines the observed state of the Longhorn backup volume
+ properties:
+ backingImageChecksum:
+ description: the backing image checksum.
+ type: string
+ backingImageName:
+ description: The backing image name.
+ type: string
+ createdAt:
+ description: The backup volume creation time.
+ type: string
+ dataStored:
+ description: The backup volume block count.
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ description: The backup volume labels.
+ nullable: true
+ type: object
+ lastBackupAt:
+ description: The latest volume backup time.
+ type: string
+ lastBackupName:
+ description: The latest volume backup name.
+ type: string
+ lastModificationTime:
+ description: The backup volume config last modification time.
+ format: date-time
+ nullable: true
+ type: string
+ lastSyncedAt:
+ description: The last time that the backup volume was synced into the cluster.
+ format: date-time
+ nullable: true
+ type: string
+ messages:
+ additionalProperties:
+ type: string
+ description: The error messages when call longhorn engine on list or inspect backup volumes.
+ nullable: true
+ type: object
+ ownerID:
+ description: The node ID on which the controller is responsible to reconcile this backup volume CR.
+ type: string
+ size:
+ description: The backup volume size.
+ type: string
+ storageClassName:
+ description: the storage class name of pv/pvc binding with the volume.
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: engineimages.longhorn.io
+spec:
+ preserveUnknownFields: false
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ name: longhorn-conversion-webhook
+ namespace: {{ include "release_namespace" . }}
+ path: /v1/webhook/conversion
+ port: 9501
+ conversionReviewVersions:
+ - v1beta2
+ - v1beta1
+ group: longhorn.io
+ names:
+ kind: EngineImage
+ listKind: EngineImageList
+ plural: engineimages
+ shortNames:
+ - lhei
+ singular: engineimage
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: State of the engine image
+ jsonPath: .status.state
+ name: State
+ type: string
+ - description: The Longhorn engine image
+ jsonPath: .spec.image
+ name: Image
+ type: string
+ - description: Number of resources using the engine image
+ jsonPath: .status.refCount
+ name: RefCount
+ type: integer
+ - description: The build date of the engine image
+ jsonPath: .status.buildDate
+ name: BuildDate
+ type: date
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: EngineImage is where Longhorn stores engine image object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: State of the engine image
+ jsonPath: .status.state
+ name: State
+ type: string
+ - description: The Longhorn engine image
+ jsonPath: .spec.image
+ name: Image
+ type: string
+ - description: Number of resources using the engine image
+ jsonPath: .status.refCount
+ name: RefCount
+ type: integer
+ - description: The build date of the engine image
+ jsonPath: .status.buildDate
+ name: BuildDate
+ type: date
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: EngineImage is where Longhorn stores engine image object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: EngineImageSpec defines the desired state of the Longhorn engine image
+ properties:
+ image:
+ minLength: 1
+ type: string
+ required:
+ - image
+ type: object
+ status:
+ description: EngineImageStatus defines the observed state of the Longhorn engine image
+ properties:
+ buildDate:
+ type: string
+ cliAPIMinVersion:
+ type: integer
+ cliAPIVersion:
+ type: integer
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ controllerAPIMinVersion:
+ type: integer
+ controllerAPIVersion:
+ type: integer
+ dataFormatMinVersion:
+ type: integer
+ dataFormatVersion:
+ type: integer
+ gitCommit:
+ type: string
+ noRefSince:
+ type: string
+ nodeDeploymentMap:
+ additionalProperties:
+ type: boolean
+ nullable: true
+ type: object
+ ownerID:
+ type: string
+ refCount:
+ type: integer
+ state:
+ type: string
+ version:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: engines.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: Engine
+ listKind: EngineList
+ plural: engines
+ shortNames:
+ - lhe
+ singular: engine
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The current state of the engine
+ jsonPath: .status.currentState
+ name: State
+ type: string
+ - description: The node that the engine is on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ - description: The instance manager of the engine
+ jsonPath: .status.instanceManagerName
+ name: InstanceManager
+ type: string
+ - description: The current image of the engine
+ jsonPath: .status.currentImage
+ name: Image
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: Engine is where Longhorn stores engine object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The current state of the engine
+ jsonPath: .status.currentState
+ name: State
+ type: string
+ - description: The node that the engine is on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ - description: The instance manager of the engine
+ jsonPath: .status.instanceManagerName
+ name: InstanceManager
+ type: string
+ - description: The current image of the engine
+ jsonPath: .status.currentImage
+ name: Image
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: Engine is where Longhorn stores engine object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: EngineSpec defines the desired state of the Longhorn engine
+ properties:
+ active:
+ type: boolean
+ backendStoreDriver:
+ enum:
+ - v1
+ - v2
+ type: string
+ backupVolume:
+ type: string
+ desireState:
+ type: string
+ disableFrontend:
+ type: boolean
+ engineImage:
+ type: string
+ frontend:
+ enum:
+ - blockdev
+ - iscsi
+ - nvmf
+ - ""
+ type: string
+ logRequested:
+ type: boolean
+ nodeID:
+ type: string
+ replicaAddressMap:
+ additionalProperties:
+ type: string
+ type: object
+ requestedBackupRestore:
+ type: string
+ requestedDataSource:
+ type: string
+ revisionCounterDisabled:
+ type: boolean
+ salvageRequested:
+ type: boolean
+ unmapMarkSnapChainRemovedEnabled:
+ type: boolean
+ upgradedReplicaAddressMap:
+ additionalProperties:
+ type: string
+ type: object
+ volumeName:
+ type: string
+ volumeSize:
+ format: int64
+ type: string
+ type: object
+ status:
+ description: EngineStatus defines the observed state of the Longhorn engine
+ properties:
+ backupStatus:
+ additionalProperties:
+ properties:
+ backupURL:
+ type: string
+ error:
+ type: string
+ progress:
+ type: integer
+ replicaAddress:
+ type: string
+ snapshotName:
+ type: string
+ state:
+ type: string
+ type: object
+ nullable: true
+ type: object
+ cloneStatus:
+ additionalProperties:
+ properties:
+ error:
+ type: string
+ fromReplicaAddress:
+ type: string
+ isCloning:
+ type: boolean
+ progress:
+ type: integer
+ snapshotName:
+ type: string
+ state:
+ type: string
+ type: object
+ nullable: true
+ type: object
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ currentImage:
+ type: string
+ currentReplicaAddressMap:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ currentSize:
+ format: int64
+ type: string
+ currentState:
+ type: string
+ endpoint:
+ type: string
+ instanceManagerName:
+ type: string
+ ip:
+ type: string
+ isExpanding:
+ type: boolean
+ lastExpansionError:
+ type: string
+ lastExpansionFailedAt:
+ type: string
+ lastRestoredBackup:
+ type: string
+ logFetched:
+ type: boolean
+ ownerID:
+ type: string
+ port:
+ type: integer
+ purgeStatus:
+ additionalProperties:
+ properties:
+ error:
+ type: string
+ isPurging:
+ type: boolean
+ progress:
+ type: integer
+ state:
+ type: string
+ type: object
+ nullable: true
+ type: object
+ rebuildStatus:
+ additionalProperties:
+ properties:
+ error:
+ type: string
+ fromReplicaAddress:
+ type: string
+ isRebuilding:
+ type: boolean
+ progress:
+ type: integer
+ state:
+ type: string
+ type: object
+ nullable: true
+ type: object
+ replicaModeMap:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ restoreStatus:
+ additionalProperties:
+ properties:
+ backupURL:
+ type: string
+ currentRestoringBackup:
+ type: string
+ error:
+ type: string
+ filename:
+ type: string
+ isRestoring:
+ type: boolean
+ lastRestored:
+ type: string
+ progress:
+ type: integer
+ state:
+ type: string
+ type: object
+ nullable: true
+ type: object
+ salvageExecuted:
+ type: boolean
+ snapshots:
+ additionalProperties:
+ properties:
+ children:
+ additionalProperties:
+ type: boolean
+ nullable: true
+ type: object
+ created:
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ name:
+ type: string
+ parent:
+ type: string
+ removed:
+ type: boolean
+ size:
+ type: string
+ usercreated:
+ type: boolean
+ type: object
+ nullable: true
+ type: object
+ snapshotsError:
+ type: string
+ started:
+ type: boolean
+ storageIP:
+ type: string
+ unmapMarkSnapChainRemovedEnabled:
+ type: boolean
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: instancemanagers.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: InstanceManager
+ listKind: InstanceManagerList
+ plural: instancemanagers
+ shortNames:
+ - lhim
+ singular: instancemanager
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The state of the instance manager
+ jsonPath: .status.currentState
+ name: State
+ type: string
+ - description: The type of the instance manager (engine or replica)
+ jsonPath: .spec.type
+ name: Type
+ type: string
+ - description: The node that the instance manager is running on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: InstanceManager is where Longhorn stores instance manager object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The state of the instance manager
+ jsonPath: .status.currentState
+ name: State
+ type: string
+ - description: The type of the instance manager (engine or replica)
+ jsonPath: .spec.type
+ name: Type
+ type: string
+ - description: The node that the instance manager is running on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: InstanceManager is where Longhorn stores instance manager object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: InstanceManagerSpec defines the desired state of the Longhorn instancer manager
+ properties:
+ image:
+ type: string
+ nodeID:
+ type: string
+ type:
+ enum:
+ - aio
+ - engine
+ - replica
+ type: string
+ type: object
+ status:
+ description: InstanceManagerStatus defines the observed state of the Longhorn instance manager
+ properties:
+ apiMinVersion:
+ type: integer
+ apiVersion:
+ type: integer
+ proxyApiMinVersion:
+ type: integer
+ proxyApiVersion:
+ type: integer
+ currentState:
+ type: string
+ instanceEngines:
+ additionalProperties:
+ properties:
+ spec:
+ properties:
+ backendStoreDriver:
+ type: string
+ name:
+ type: string
+ type: object
+ status:
+ properties:
+ endpoint:
+ type: string
+ errorMsg:
+ type: string
+ listen:
+ type: string
+ portEnd:
+ format: int32
+ type: integer
+ portStart:
+ format: int32
+ type: integer
+ resourceVersion:
+ format: int64
+ type: integer
+ state:
+ type: string
+ type:
+ type: string
+ type: object
+ type: object
+ nullable: true
+ type: object
+ instanceReplicas:
+ additionalProperties:
+ properties:
+ spec:
+ properties:
+ backendStoreDriver:
+ type: string
+ name:
+ type: string
+ type: object
+ status:
+ properties:
+ endpoint:
+ type: string
+ errorMsg:
+ type: string
+ listen:
+ type: string
+ portEnd:
+ format: int32
+ type: integer
+ portStart:
+ format: int32
+ type: integer
+ resourceVersion:
+ format: int64
+ type: integer
+ state:
+ type: string
+ type:
+ type: string
+ type: object
+ type: object
+ nullable: true
+ type: object
+ instances:
+ additionalProperties:
+ properties:
+ spec:
+ properties:
+ backendStoreDriver:
+ type: string
+ name:
+ type: string
+ type: object
+ status:
+ properties:
+ endpoint:
+ type: string
+ errorMsg:
+ type: string
+ listen:
+ type: string
+ portEnd:
+ format: int32
+ type: integer
+ portStart:
+ format: int32
+ type: integer
+ resourceVersion:
+ format: int64
+ type: integer
+ state:
+ type: string
+ type:
+ type: string
+ type: object
+ type: object
+ nullable: true
+ description: 'Deprecated: Replaced by InstanceEngines and InstanceReplicas'
+ type: object
+ ip:
+ type: string
+ ownerID:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: nodes.longhorn.io
+spec:
+ preserveUnknownFields: false
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ name: longhorn-conversion-webhook
+ namespace: {{ include "release_namespace" . }}
+ path: /v1/webhook/conversion
+ port: 9501
+ conversionReviewVersions:
+ - v1beta2
+ - v1beta1
+ group: longhorn.io
+ names:
+ kind: Node
+ listKind: NodeList
+ plural: nodes
+ shortNames:
+ - lhn
+ singular: node
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Indicate whether the node is ready
+ jsonPath: .status.conditions['Ready']['status']
+ name: Ready
+ type: string
+ - description: Indicate whether the user disabled/enabled replica scheduling for the node
+ jsonPath: .spec.allowScheduling
+ name: AllowScheduling
+ type: boolean
+ - description: Indicate whether Longhorn can schedule replicas on the node
+ jsonPath: .status.conditions['Schedulable']['status']
+ name: Schedulable
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: Node is where Longhorn stores Longhorn node object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: Indicate whether the node is ready
+ jsonPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - description: Indicate whether the user disabled/enabled replica scheduling for the node
+ jsonPath: .spec.allowScheduling
+ name: AllowScheduling
+ type: boolean
+ - description: Indicate whether Longhorn can schedule replicas on the node
+ jsonPath: .status.conditions[?(@.type=='Schedulable')].status
+ name: Schedulable
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: Node is where Longhorn stores Longhorn node object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: NodeSpec defines the desired state of the Longhorn node
+ properties:
+ allowScheduling:
+ type: boolean
+ disks:
+ additionalProperties:
+ properties:
+ allowScheduling:
+ type: boolean
+ evictionRequested:
+ type: boolean
+ path:
+ type: string
+ storageReserved:
+ format: int64
+ type: integer
+ tags:
+ items:
+ type: string
+ type: array
+ diskType:
+ enum:
+ - filesystem
+ - block
+ type: string
+ type: object
+ type: object
+ evictionRequested:
+ type: boolean
+ instanceManagerCPURequest:
+ type: integer
+ name:
+ type: string
+ tags:
+ items:
+ type: string
+ type: array
+ type: object
+ status:
+ description: NodeStatus defines the observed state of the Longhorn node
+ properties:
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ diskStatus:
+ additionalProperties:
+ properties:
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ diskType:
+ type: string
+ diskUUID:
+ type: string
+ scheduledReplica:
+ additionalProperties:
+ format: int64
+ type: integer
+ nullable: true
+ type: object
+ storageAvailable:
+ format: int64
+ type: integer
+ storageMaximum:
+ format: int64
+ type: integer
+ storageScheduled:
+ format: int64
+ type: integer
+ type: object
+ nullable: true
+ type: object
+ region:
+ type: string
+ snapshotCheckStatus:
+ properties:
+ lastPeriodicCheckedAt:
+ format: date-time
+ type: string
+ snapshotCheckState:
+ type: string
+ type: object
+ zone:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: orphans.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: Orphan
+ listKind: OrphanList
+ plural: orphans
+ shortNames:
+ - lho
+ singular: orphan
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The type of the orphan
+ jsonPath: .spec.orphanType
+ name: Type
+ type: string
+ - description: The node that the orphan is on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: Orphan is where Longhorn stores orphan object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: OrphanSpec defines the desired state of the Longhorn orphaned data
+ properties:
+ nodeID:
+ description: The node ID on which the controller is responsible to reconcile this orphan CR.
+ type: string
+ orphanType:
+ description: The type of the orphaned data. Can be "replica".
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: The parameters of the orphaned data
+ type: object
+ type: object
+ status:
+ description: OrphanStatus defines the observed state of the Longhorn orphaned data
+ properties:
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ ownerID:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels:
+ longhorn-manager: ""
+ name: recurringjobs.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: RecurringJob
+ listKind: RecurringJobList
+ plural: recurringjobs
+ shortNames:
+ - lhrj
+ singular: recurringjob
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Sets groupings to the jobs. When set to "default" group will be added to the volume label when no other job label exist in volume
+ jsonPath: .spec.groups
+ name: Groups
+ type: string
+ - description: Should be one of "backup" or "snapshot"
+ jsonPath: .spec.task
+ name: Task
+ type: string
+ - description: The cron expression represents recurring job scheduling
+ jsonPath: .spec.cron
+ name: Cron
+ type: string
+ - description: The number of snapshots/backups to keep for the volume
+ jsonPath: .spec.retain
+ name: Retain
+ type: integer
+ - description: The concurrent job to run by each cron job
+ jsonPath: .spec.concurrency
+ name: Concurrency
+ type: integer
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - description: Specify the labels
+ jsonPath: .spec.labels
+ name: Labels
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: RecurringJob is where Longhorn stores recurring job object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: Sets groupings to the jobs. When set to "default" group will be added to the volume label when no other job label exist in volume
+ jsonPath: .spec.groups
+ name: Groups
+ type: string
+ - description: Should be one of "snapshot", "snapshot-force-create", "snapshot-cleanup", "snapshot-delete", "backup", "backup-force-create" or "filesystem-trim"
+ jsonPath: .spec.task
+ name: Task
+ type: string
+ - description: The cron expression represents recurring job scheduling
+ jsonPath: .spec.cron
+ name: Cron
+ type: string
+ - description: The number of snapshots/backups to keep for the volume
+ jsonPath: .spec.retain
+ name: Retain
+ type: integer
+ - description: The concurrent job to run by each cron job
+ jsonPath: .spec.concurrency
+ name: Concurrency
+ type: integer
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - description: Specify the labels
+ jsonPath: .spec.labels
+ name: Labels
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: RecurringJob is where Longhorn stores recurring job object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: RecurringJobSpec defines the desired state of the Longhorn recurring job
+ properties:
+ concurrency:
+ description: The concurrency of taking the snapshot/backup.
+ type: integer
+ cron:
+ description: The cron setting.
+ type: string
+ groups:
+ description: The recurring job group.
+ items:
+ type: string
+ type: array
+ labels:
+ additionalProperties:
+ type: string
+ description: The label of the snapshot/backup.
+ type: object
+ name:
+ description: The recurring job name.
+ type: string
+ retain:
+ description: The retain count of the snapshot/backup.
+ type: integer
+ task:
+ description: The recurring job task. Can be "snapshot", "snapshot-force-create", "snapshot-cleanup", "snapshot-delete", "backup", "backup-force-create" or "filesystem-trim"
+ enum:
+ - snapshot
+ - snapshot-force-create
+ - snapshot-cleanup
+ - snapshot-delete
+ - backup
+ - backup-force-create
+ - filesystem-trim
+ type: string
+ type: object
+ status:
+ description: RecurringJobStatus defines the observed state of the Longhorn recurring job
+ properties:
+ ownerID:
+ description: The owner ID which is responsible to reconcile this recurring job CR.
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: replicas.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: Replica
+ listKind: ReplicaList
+ plural: replicas
+ shortNames:
+ - lhr
+ singular: replica
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The current state of the replica
+ jsonPath: .status.currentState
+ name: State
+ type: string
+ - description: The node that the replica is on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ - description: The disk that the replica is on
+ jsonPath: .spec.diskID
+ name: Disk
+ type: string
+ - description: The instance manager of the replica
+ jsonPath: .status.instanceManagerName
+ name: InstanceManager
+ type: string
+ - description: The current image of the replica
+ jsonPath: .status.currentImage
+ name: Image
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: Replica is where Longhorn stores replica object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The current state of the replica
+ jsonPath: .status.currentState
+ name: State
+ type: string
+ - description: The node that the replica is on
+ jsonPath: .spec.nodeID
+ name: Node
+ type: string
+ - description: The disk that the replica is on
+ jsonPath: .spec.diskID
+ name: Disk
+ type: string
+ - description: The instance manager of the replica
+ jsonPath: .status.instanceManagerName
+ name: InstanceManager
+ type: string
+ - description: The current image of the replica
+ jsonPath: .status.currentImage
+ name: Image
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: Replica is where Longhorn stores replica object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ReplicaSpec defines the desired state of the Longhorn replica
+ properties:
+ active:
+ type: boolean
+ backendStoreDriver:
+ enum:
+ - v1
+ - v2
+ type: string
+ backingImage:
+ type: string
+ dataDirectoryName:
+ type: string
+ desireState:
+ type: string
+ diskID:
+ type: string
+ diskPath:
+ type: string
+ engineImage:
+ type: string
+ engineName:
+ type: string
+ failedAt:
+ type: string
+ hardNodeAffinity:
+ type: string
+ healthyAt:
+ type: string
+ logRequested:
+ type: boolean
+ nodeID:
+ type: string
+ rebuildRetryCount:
+ type: integer
+ revisionCounterDisabled:
+ type: boolean
+ salvageRequested:
+ type: boolean
+ unmapMarkDiskChainRemovedEnabled:
+ type: boolean
+ volumeName:
+ type: string
+ volumeSize:
+ format: int64
+ type: string
+ type: object
+ status:
+ description: ReplicaStatus defines the observed state of the Longhorn replica
+ properties:
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ currentImage:
+ type: string
+ currentState:
+ type: string
+ evictionRequested:
+ type: boolean
+ instanceManagerName:
+ type: string
+ ip:
+ type: string
+ logFetched:
+ type: boolean
+ ownerID:
+ type: string
+ port:
+ type: integer
+ salvageExecuted:
+ type: boolean
+ started:
+ type: boolean
+ storageIP:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: settings.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: Setting
+ listKind: SettingList
+ plural: settings
+ shortNames:
+ - lhs
+ singular: setting
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The value of the setting
+ jsonPath: .value
+ name: Value
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: Setting is where Longhorn stores setting object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ value:
+ type: string
+ required:
+ - value
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The value of the setting
+ jsonPath: .value
+ name: Value
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: Setting is where Longhorn stores setting object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ value:
+ type: string
+ required:
+ - value
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: sharemanagers.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: ShareManager
+ listKind: ShareManagerList
+ plural: sharemanagers
+ shortNames:
+ - lhsm
+ singular: sharemanager
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The state of the share manager
+ jsonPath: .status.state
+ name: State
+ type: string
+ - description: The node that the share manager is owned by
+ jsonPath: .status.ownerID
+ name: Node
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: ShareManager is where Longhorn stores share manager object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The state of the share manager
+ jsonPath: .status.state
+ name: State
+ type: string
+ - description: The node that the share manager is owned by
+ jsonPath: .status.ownerID
+ name: Node
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: ShareManager is where Longhorn stores share manager object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ShareManagerSpec defines the desired state of the Longhorn share manager
+ properties:
+ image:
+ description: Share manager image used for creating a share manager pod
+ type: string
+ type: object
+ status:
+ description: ShareManagerStatus defines the observed state of the Longhorn share manager
+ properties:
+ endpoint:
+ description: NFS endpoint that can access the mounted filesystem of the volume
+ type: string
+ ownerID:
+ description: The node ID on which the controller is responsible to reconcile this share manager resource
+ type: string
+ state:
+ description: The state of the share manager resource
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: snapshots.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: Snapshot
+ listKind: SnapshotList
+ plural: snapshots
+ shortNames:
+ - lhsnap
+ singular: snapshot
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The volume that this snapshot belongs to
+ jsonPath: .spec.volume
+ name: Volume
+ type: string
+ - description: Timestamp when the point-in-time snapshot was taken
+ jsonPath: .status.creationTime
+ name: CreationTime
+ type: string
+ - description: Indicates if the snapshot is ready to be used to restore/backup a volume
+ jsonPath: .status.readyToUse
+ name: ReadyToUse
+ type: boolean
+ - description: Represents the minimum size of volume required to rehydrate from this snapshot
+ jsonPath: .status.restoreSize
+ name: RestoreSize
+ type: string
+ - description: The actual size of the snapshot
+ jsonPath: .status.size
+ name: Size
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: Snapshot is the Schema for the snapshots API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SnapshotSpec defines the desired state of Longhorn Snapshot
+ properties:
+ createSnapshot:
+ description: require creating a new snapshot
+ type: boolean
+ labels:
+ additionalProperties:
+ type: string
+ description: The labels of snapshot
+ nullable: true
+ type: object
+ volume:
+ description: the volume that this snapshot belongs to. This field is immutable after creation. Required
+ type: string
+ required:
+ - volume
+ type: object
+ status:
+ description: SnapshotStatus defines the observed state of Longhorn Snapshot
+ properties:
+ checksum:
+ type: string
+ children:
+ additionalProperties:
+ type: boolean
+ nullable: true
+ type: object
+ creationTime:
+ type: string
+ error:
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ markRemoved:
+ type: boolean
+ ownerID:
+ type: string
+ parent:
+ type: string
+ readyToUse:
+ type: boolean
+ restoreSize:
+ format: int64
+ type: integer
+ size:
+ format: int64
+ type: integer
+ userCreated:
+ type: boolean
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: supportbundles.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: SupportBundle
+ listKind: SupportBundleList
+ plural: supportbundles
+ shortNames:
+ - lhbundle
+ singular: supportbundle
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The state of the support bundle
+ jsonPath: .status.state
+ name: State
+ type: string
+ - description: The issue URL
+ jsonPath: .spec.issueURL
+ name: Issue
+ type: string
+ - description: A brief description of the issue
+ jsonPath: .spec.description
+ name: Description
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: SupportBundle is where Longhorn stores support bundle object
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SupportBundleSpec defines the desired state of the Longhorn SupportBundle
+ properties:
+ description:
+ description: A brief description of the issue
+ type: string
+ issueURL:
+ description: The issue URL
+ nullable: true
+ type: string
+ nodeID:
+ description: The preferred responsible controller node ID.
+ type: string
+ required:
+ - description
+ type: object
+ status:
+ description: SupportBundleStatus defines the observed state of the Longhorn SupportBundle
+ properties:
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ type: array
+ filename:
+ type: string
+ filesize:
+ format: int64
+ type: integer
+ image:
+ description: The support bundle manager image
+ type: string
+ managerIP:
+ description: The support bundle manager IP
+ type: string
+ ownerID:
+ description: The current responsible controller node ID
+ type: string
+ progress:
+ type: integer
+ state:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: systembackups.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: SystemBackup
+ listKind: SystemBackupList
+ plural: systembackups
+ shortNames:
+ - lhsb
+ singular: systembackup
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The system backup Longhorn version
+ jsonPath: .status.version
+ name: Version
+ type: string
+ - description: The system backup state
+ jsonPath: .status.state
+ name: State
+ type: string
+ - description: The system backup creation time
+ jsonPath: .status.createdAt
+ name: Created
+ type: string
+ - description: The last time that the system backup was synced into the cluster
+ jsonPath: .status.lastSyncedAt
+ name: LastSyncedAt
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: SystemBackup is where Longhorn stores system backup object
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SystemBackupSpec defines the desired state of the Longhorn SystemBackup
+ properties:
+ volumeBackupPolicy:
+ description: The create volume backup policy Can be "if-not-present", "always" or "disabled"
+ nullable: true
+ type: string
+ type: object
+ status:
+ description: SystemBackupStatus defines the observed state of the Longhorn SystemBackup
+ properties:
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ createdAt:
+ description: The system backup creation time.
+ format: date-time
+ type: string
+ gitCommit:
+ description: The saved Longhorn manager git commit.
+ nullable: true
+ type: string
+ lastSyncedAt:
+ description: The last time that the system backup was synced into the cluster.
+ format: date-time
+ nullable: true
+ type: string
+ managerImage:
+ description: The saved manager image.
+ type: string
+ ownerID:
+ description: The node ID of the responsible controller to reconcile this SystemBackup.
+ type: string
+ state:
+ description: The system backup state.
+ type: string
+ version:
+ description: The saved Longhorn version.
+ nullable: true
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: systemrestores.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: SystemRestore
+ listKind: SystemRestoreList
+ plural: systemrestores
+ shortNames:
+ - lhsr
+ singular: systemrestore
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The system restore state
+ jsonPath: .status.state
+ name: State
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: SystemRestore is where Longhorn stores system restore object
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SystemRestoreSpec defines the desired state of the Longhorn SystemRestore
+ properties:
+ systemBackup:
+ description: The system backup name in the object store.
+ type: string
+ required:
+ - systemBackup
+ type: object
+ status:
+ description: SystemRestoreStatus defines the observed state of the Longhorn SystemRestore
+ properties:
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ ownerID:
+ description: The node ID of the responsible controller to reconcile this SystemRestore.
+ type: string
+ sourceURL:
+ description: The source system backup URL.
+ type: string
+ state:
+ description: The system restore state.
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: volumes.longhorn.io
+spec:
+ preserveUnknownFields: false
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ name: longhorn-conversion-webhook
+ namespace: {{ include "release_namespace" . }}
+ path: /v1/webhook/conversion
+ port: 9501
+ conversionReviewVersions:
+ - v1beta2
+ - v1beta1
+ group: longhorn.io
+ names:
+ kind: Volume
+ listKind: VolumeList
+ plural: volumes
+ shortNames:
+ - lhv
+ singular: volume
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: The state of the volume
+ jsonPath: .status.state
+ name: State
+ type: string
+ - description: The robustness of the volume
+ jsonPath: .status.robustness
+ name: Robustness
+ type: string
+ - description: The scheduled condition of the volume
+ jsonPath: .status.conditions['scheduled']['status']
+ name: Scheduled
+ type: string
+ - description: The size of the volume
+ jsonPath: .spec.size
+ name: Size
+ type: string
+ - description: The node that the volume is currently attaching to
+ jsonPath: .status.currentNodeID
+ name: Node
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: Volume is where Longhorn stores volume object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: The state of the volume
+ jsonPath: .status.state
+ name: State
+ type: string
+ - description: The robustness of the volume
+ jsonPath: .status.robustness
+ name: Robustness
+ type: string
+ - description: The scheduled condition of the volume
+ jsonPath: .status.conditions[?(@.type=='Schedulable')].status
+ name: Scheduled
+ type: string
+ - description: The size of the volume
+ jsonPath: .spec.size
+ name: Size
+ type: string
+ - description: The node that the volume is currently attaching to
+ jsonPath: .status.currentNodeID
+ name: Node
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: Volume is where Longhorn stores volume object.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: VolumeSpec defines the desired state of the Longhorn volume
+ properties:
+ Standby:
+ type: boolean
+ accessMode:
+ enum:
+ - rwo
+ - rwx
+ type: string
+ backendStoreDriver:
+ enum:
+ - v1
+ - v2
+ type: string
+ backingImage:
+ type: string
+ backupCompressionMethod:
+ enum:
+ - none
+ - lz4
+ - gzip
+ type: string
+ dataLocality:
+ enum:
+ - disabled
+ - best-effort
+ - strict-local
+ type: string
+ dataSource:
+ type: string
+ disableFrontend:
+ type: boolean
+ diskSelector:
+ items:
+ type: string
+ type: array
+ encrypted:
+ type: boolean
+ engineImage:
+ type: string
+ fromBackup:
+ type: string
+ frontend:
+ enum:
+ - blockdev
+ - iscsi
+ - nvmf
+ - ""
+ type: string
+ lastAttachedBy:
+ type: string
+ migratable:
+ type: boolean
+ migrationNodeID:
+ type: string
+ nodeID:
+ type: string
+ nodeSelector:
+ items:
+ type: string
+ type: array
+ numberOfReplicas:
+ type: integer
+ offlineReplicaRebuilding:
+ description: OfflineReplicaRebuilding is used to determine if the offline replica rebuilding feature is enabled or not
+ enum:
+ - ignored
+ - disabled
+ - enabled
+ type: string
+ replicaAutoBalance:
+ enum:
+ - ignored
+ - disabled
+ - least-effort
+ - best-effort
+ type: string
+ replicaSoftAntiAffinity:
+ description: Replica soft anti affinity of the volume. Set enabled to allow replicas to be scheduled on the same node
+ enum:
+ - ignored
+ - enabled
+ - disabled
+ type: string
+ replicaZoneSoftAntiAffinity:
+ description: Replica zone soft anti affinity of the volume. Set enabled to allow replicas to be scheduled in the same zone
+ enum:
+ - ignored
+ - enabled
+ - disabled
+ type: string
+ restoreVolumeRecurringJob:
+ enum:
+ - ignored
+ - enabled
+ - disabled
+ type: string
+ revisionCounterDisabled:
+ type: boolean
+ size:
+ format: int64
+ type: string
+ snapshotDataIntegrity:
+ enum:
+ - ignored
+ - disabled
+ - enabled
+ - fast-check
+ type: string
+ staleReplicaTimeout:
+ type: integer
+ unmapMarkSnapChainRemoved:
+ enum:
+ - ignored
+ - disabled
+ - enabled
+ type: string
+ type: object
+ status:
+ description: VolumeStatus defines the observed state of the Longhorn volume
+ properties:
+ actualSize:
+ format: int64
+ type: integer
+ cloneStatus:
+ properties:
+ snapshot:
+ type: string
+ sourceVolume:
+ type: string
+ state:
+ type: string
+ type: object
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ currentImage:
+ type: string
+ currentMigrationNodeID:
+ description: the node that this volume is currently migrating to
+ type: string
+ currentNodeID:
+ type: string
+ expansionRequired:
+ type: boolean
+ frontendDisabled:
+ type: boolean
+ isStandby:
+ type: boolean
+ kubernetesStatus:
+ properties:
+ lastPVCRefAt:
+ type: string
+ lastPodRefAt:
+ type: string
+ namespace:
+ description: determine if PVC/Namespace is history or not
+ type: string
+ pvName:
+ type: string
+ pvStatus:
+ type: string
+ pvcName:
+ type: string
+ workloadsStatus:
+ description: determine if Pod/Workload is history or not
+ items:
+ properties:
+ podName:
+ type: string
+ podStatus:
+ type: string
+ workloadName:
+ type: string
+ workloadType:
+ type: string
+ type: object
+ nullable: true
+ type: array
+ type: object
+ lastBackup:
+ type: string
+ lastBackupAt:
+ type: string
+ lastDegradedAt:
+ type: string
+ offlineReplicaRebuildingRequired:
+ type: boolean
+ ownerID:
+ type: string
+ pendingNodeID:
+ description: Deprecated.
+ type: string
+ remountRequestedAt:
+ type: string
+ restoreInitiated:
+ type: boolean
+ restoreRequired:
+ type: boolean
+ robustness:
+ type: string
+ shareEndpoint:
+ type: string
+ shareState:
+ type: string
+ state:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ longhorn-manager: ""
+ name: volumeattachments.longhorn.io
+spec:
+ group: longhorn.io
+ names:
+ kind: VolumeAttachment
+ listKind: VolumeAttachmentList
+ plural: volumeattachments
+ shortNames:
+ - lhva
+ singular: volumeattachment
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: VolumeAttachment stores attachment information of a Longhorn volume
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: VolumeAttachmentSpec defines the desired state of Longhorn VolumeAttachment
+ properties:
+ attachmentTickets:
+ additionalProperties:
+ properties:
+ generation:
+ description: A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.
+ format: int64
+ type: integer
+ id:
+ description: The unique ID of this attachment. Used to differentiate different attachments of the same volume.
+ type: string
+ nodeID:
+ description: The node that this attachment is requesting
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: Optional additional parameter for this attachment
+ type: object
+ type:
+ type: string
+ type: object
+ type: object
+ volume:
+ description: The name of Longhorn volume of this VolumeAttachment
+ type: string
+ required:
+ - volume
+ type: object
+ status:
+ description: VolumeAttachmentStatus defines the observed state of Longhorn VolumeAttachment
+ properties:
+ attachmentTicketStatuses:
+ additionalProperties:
+ properties:
+ conditions:
+ description: Record any error when trying to fulfill this attachment
+ items:
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, one-word, CamelCase reason for the condition's last transition.
+ type: string
+ status:
+ description: Status is the status of the condition. Can be True, False, Unknown.
+ type: string
+ type:
+ description: Type is the type of the condition.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ generation:
+ description: A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.
+ format: int64
+ type: integer
+ id:
+ description: The unique ID of this attachment. Used to differentiate different attachments of the same volume.
+ type: string
+ satisfied:
+ description: Indicate whether this attachment ticket has been satisfied
+ type: boolean
+ required:
+ - conditions
+ - satisfied
+ type: object
+ type: object
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/charts/longhorn/templates/daemonset-sa.yaml b/charts/longhorn/templates/daemonset-sa.yaml
new file mode 100644
index 0000000..f361d27
--- /dev/null
+++ b/charts/longhorn/templates/daemonset-sa.yaml
@@ -0,0 +1,151 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ app: longhorn-manager
+ name: longhorn-manager
+ namespace: {{ include "release_namespace" . }}
+spec:
+ selector:
+ matchLabels:
+ app: longhorn-manager
+ template:
+ metadata:
+ labels: {{- include "longhorn.labels" . | nindent 8 }}
+ app: longhorn-manager
+ {{- with .Values.annotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ spec:
+ containers:
+ - name: longhorn-manager
+ image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ securityContext:
+ privileged: true
+ command:
+ - longhorn-manager
+ - -d
+ {{- if eq .Values.longhornManager.log.format "json" }}
+ - -j
+ {{- end }}
+ - daemon
+ - --engine-image
+ - "{{ template "registry_url" . }}{{ .Values.image.longhorn.engine.repository }}:{{ .Values.image.longhorn.engine.tag }}"
+ - --instance-manager-image
+ - "{{ template "registry_url" . }}{{ .Values.image.longhorn.instanceManager.repository }}:{{ .Values.image.longhorn.instanceManager.tag }}"
+ - --share-manager-image
+ - "{{ template "registry_url" . }}{{ .Values.image.longhorn.shareManager.repository }}:{{ .Values.image.longhorn.shareManager.tag }}"
+ - --backing-image-manager-image
+ - "{{ template "registry_url" . }}{{ .Values.image.longhorn.backingImageManager.repository }}:{{ .Values.image.longhorn.backingImageManager.tag }}"
+ - --support-bundle-manager-image
+ - "{{ template "registry_url" . }}{{ .Values.image.longhorn.supportBundleKit.repository }}:{{ .Values.image.longhorn.supportBundleKit.tag }}"
+ - --manager-image
+ - "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}"
+ - --service-account
+ - longhorn-service-account
+ ports:
+ - containerPort: 9500
+ name: manager
+ - containerPort: 9501
+ name: conversion-wh
+ - containerPort: 9502
+ name: admission-wh
+ - containerPort: 9503
+ name: recov-backend
+ readinessProbe:
+ httpGet:
+ path: /v1/healthz
+ port: 9501
+ scheme: HTTPS
+ volumeMounts:
+ - name: dev
+ mountPath: /host/dev/
+ - name: proc
+ mountPath: /host/proc/
+ - name: longhorn
+ mountPath: /var/lib/longhorn/
+ mountPropagation: Bidirectional
+ - name: longhorn-grpc-tls
+ mountPath: /tls-files/
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ volumes:
+ - name: dev
+ hostPath:
+ path: /dev/
+ - name: proc
+ hostPath:
+ path: /proc/
+ - name: longhorn
+ hostPath:
+ path: /var/lib/longhorn/
+ - name: longhorn-grpc-tls
+ secret:
+ secretName: longhorn-grpc-tls
+ optional: true
+ {{- if .Values.privateRegistry.registrySecret }}
+ imagePullSecrets:
+ - name: {{ .Values.privateRegistry.registrySecret }}
+ {{- end }}
+ {{- if .Values.longhornManager.priorityClass }}
+ priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
+ {{- end }}
+ {{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
+ tolerations:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
+{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
+ {{- end }}
+ {{- if .Values.longhornManager.tolerations }}
+{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
+ {{- end }}
+ {{- end }}
+ {{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
+ nodeSelector:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
+{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.longhornManager.nodeSelector }}
+{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- end }}
+ serviceAccountName: longhorn-service-account
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: "100%"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ app: longhorn-manager
+ name: longhorn-backend
+ namespace: {{ include "release_namespace" . }}
+ {{- if .Values.longhornManager.serviceAnnotations }}
+ annotations:
+{{ toYaml .Values.longhornManager.serviceAnnotations | indent 4 }}
+ {{- end }}
+spec:
+ type: {{ .Values.service.manager.type }}
+ sessionAffinity: ClientIP
+ selector:
+ app: longhorn-manager
+ ports:
+ - name: manager
+ port: 9500
+ targetPort: manager
+ {{- if .Values.service.manager.nodePort }}
+ nodePort: {{ .Values.service.manager.nodePort }}
+ {{- end }}
diff --git a/charts/longhorn/templates/default-setting.yaml b/charts/longhorn/templates/default-setting.yaml
new file mode 100644
index 0000000..ac38ba9
--- /dev/null
+++ b/charts/longhorn/templates/default-setting.yaml
@@ -0,0 +1,83 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: longhorn-default-setting
+ namespace: {{ include "release_namespace" . }}
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+data:
+ default-setting.yaml: |-
+ {{ if not (kindIs "invalid" .Values.defaultSettings.backupTarget) }}backup-target: {{ .Values.defaultSettings.backupTarget }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.backupTargetCredentialSecret) }}backup-target-credential-secret: {{ .Values.defaultSettings.backupTargetCredentialSecret }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.allowRecurringJobWhileVolumeDetached) }}allow-recurring-job-while-volume-detached: {{ .Values.defaultSettings.allowRecurringJobWhileVolumeDetached }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.createDefaultDiskLabeledNodes) }}create-default-disk-labeled-nodes: {{ .Values.defaultSettings.createDefaultDiskLabeledNodes }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.defaultDataPath) }}default-data-path: {{ .Values.defaultSettings.defaultDataPath }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.replicaSoftAntiAffinity) }}replica-soft-anti-affinity: {{ .Values.defaultSettings.replicaSoftAntiAffinity }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.replicaAutoBalance) }}replica-auto-balance: {{ .Values.defaultSettings.replicaAutoBalance }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.storageOverProvisioningPercentage) }}storage-over-provisioning-percentage: {{ .Values.defaultSettings.storageOverProvisioningPercentage }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.storageMinimalAvailablePercentage) }}storage-minimal-available-percentage: {{ .Values.defaultSettings.storageMinimalAvailablePercentage }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.storageReservedPercentageForDefaultDisk) }}storage-reserved-percentage-for-default-disk: {{ .Values.defaultSettings.storageReservedPercentageForDefaultDisk }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.upgradeChecker) }}upgrade-checker: {{ .Values.defaultSettings.upgradeChecker }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.defaultReplicaCount) }}default-replica-count: {{ .Values.defaultSettings.defaultReplicaCount }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.defaultDataLocality) }}default-data-locality: {{ .Values.defaultSettings.defaultDataLocality }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.defaultLonghornStaticStorageClass) }}default-longhorn-static-storage-class: {{ .Values.defaultSettings.defaultLonghornStaticStorageClass }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.backupstorePollInterval) }}backupstore-poll-interval: {{ .Values.defaultSettings.backupstorePollInterval }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.failedBackupTTL) }}failed-backup-ttl: {{ .Values.defaultSettings.failedBackupTTL }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.restoreVolumeRecurringJobs) }}restore-volume-recurring-jobs: {{ .Values.defaultSettings.restoreVolumeRecurringJobs }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.recurringSuccessfulJobsHistoryLimit) }}recurring-successful-jobs-history-limit: {{ .Values.defaultSettings.recurringSuccessfulJobsHistoryLimit }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.recurringFailedJobsHistoryLimit) }}recurring-failed-jobs-history-limit: {{ .Values.defaultSettings.recurringFailedJobsHistoryLimit }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.supportBundleFailedHistoryLimit) }}support-bundle-failed-history-limit: {{ .Values.defaultSettings.supportBundleFailedHistoryLimit }}{{ end }}
+ {{- if or (not (kindIs "invalid" .Values.defaultSettings.taintToleration)) (.Values.global.cattle.windowsCluster.enabled) }}
+ taint-toleration: {{ $windowsDefaultSettingTaintToleration := list }}{{ $defaultSettingTaintToleration := list -}}
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.defaultSetting.taintToleration -}}
+ {{- $windowsDefaultSettingTaintToleration = .Values.global.cattle.windowsCluster.defaultSetting.taintToleration -}}
+ {{- end -}}
+ {{- if not (kindIs "invalid" .Values.defaultSettings.taintToleration) -}}
+ {{- $defaultSettingTaintToleration = .Values.defaultSettings.taintToleration -}}
+ {{- end -}}
+ {{- $taintToleration := list $windowsDefaultSettingTaintToleration $defaultSettingTaintToleration }}{{ join ";" (compact $taintToleration) -}}
+ {{- end }}
+ {{- if or (not (kindIs "invalid" .Values.defaultSettings.systemManagedComponentsNodeSelector)) (.Values.global.cattle.windowsCluster.enabled) }}
+ system-managed-components-node-selector: {{ $windowsDefaultSettingNodeSelector := list }}{{ $defaultSettingNodeSelector := list -}}
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector -}}
+ {{ $windowsDefaultSettingNodeSelector = .Values.global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector -}}
+ {{- end -}}
+ {{- if not (kindIs "invalid" .Values.defaultSettings.systemManagedComponentsNodeSelector) -}}
+ {{- $defaultSettingNodeSelector = .Values.defaultSettings.systemManagedComponentsNodeSelector -}}
+ {{- end -}}
+ {{- $nodeSelector := list $windowsDefaultSettingNodeSelector $defaultSettingNodeSelector }}{{ join ";" (compact $nodeSelector) -}}
+ {{- end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.priorityClass) }}priority-class: {{ .Values.defaultSettings.priorityClass }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.autoSalvage) }}auto-salvage: {{ .Values.defaultSettings.autoSalvage }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly) }}auto-delete-pod-when-volume-detached-unexpectedly: {{ .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.disableSchedulingOnCordonedNode) }}disable-scheduling-on-cordoned-node: {{ .Values.defaultSettings.disableSchedulingOnCordonedNode }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.replicaZoneSoftAntiAffinity) }}replica-zone-soft-anti-affinity: {{ .Values.defaultSettings.replicaZoneSoftAntiAffinity }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.nodeDownPodDeletionPolicy) }}node-down-pod-deletion-policy: {{ .Values.defaultSettings.nodeDownPodDeletionPolicy }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.nodeDrainPolicy) }}node-drain-policy: {{ .Values.defaultSettings.nodeDrainPolicy }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.replicaReplenishmentWaitInterval) }}replica-replenishment-wait-interval: {{ .Values.defaultSettings.replicaReplenishmentWaitInterval }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.concurrentReplicaRebuildPerNodeLimit) }}concurrent-replica-rebuild-per-node-limit: {{ .Values.defaultSettings.concurrentReplicaRebuildPerNodeLimit }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.concurrentVolumeBackupRestorePerNodeLimit) }}concurrent-volume-backup-restore-per-node-limit: {{ .Values.defaultSettings.concurrentVolumeBackupRestorePerNodeLimit }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.disableRevisionCounter) }}disable-revision-counter: {{ .Values.defaultSettings.disableRevisionCounter }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.systemManagedPodsImagePullPolicy) }}system-managed-pods-image-pull-policy: {{ .Values.defaultSettings.systemManagedPodsImagePullPolicy }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability) }}allow-volume-creation-with-degraded-availability: {{ .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot) }}auto-cleanup-system-generated-snapshot: {{ .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit) }}concurrent-automatic-engine-upgrade-per-node-limit: {{ .Values.defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.backingImageCleanupWaitInterval) }}backing-image-cleanup-wait-interval: {{ .Values.defaultSettings.backingImageCleanupWaitInterval }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.backingImageRecoveryWaitInterval) }}backing-image-recovery-wait-interval: {{ .Values.defaultSettings.backingImageRecoveryWaitInterval }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.guaranteedInstanceManagerCPU) }}guaranteed-instance-manager-cpu: {{ .Values.defaultSettings.guaranteedInstanceManagerCPU }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.kubernetesClusterAutoscalerEnabled) }}kubernetes-cluster-autoscaler-enabled: {{ .Values.defaultSettings.kubernetesClusterAutoscalerEnabled }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.orphanAutoDeletion) }}orphan-auto-deletion: {{ .Values.defaultSettings.orphanAutoDeletion }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.storageNetwork) }}storage-network: {{ .Values.defaultSettings.storageNetwork }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.deletingConfirmationFlag) }}deleting-confirmation-flag: {{ .Values.defaultSettings.deletingConfirmationFlag }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.engineReplicaTimeout) }}engine-replica-timeout: {{ .Values.defaultSettings.engineReplicaTimeout }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrity) }}snapshot-data-integrity: {{ .Values.defaultSettings.snapshotDataIntegrity }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation) }}snapshot-data-integrity-immediate-check-after-snapshot-creation: {{ .Values.defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrityCronjob) }}snapshot-data-integrity-cronjob: {{ .Values.defaultSettings.snapshotDataIntegrityCronjob }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.removeSnapshotsDuringFilesystemTrim) }}remove-snapshots-during-filesystem-trim: {{ .Values.defaultSettings.removeSnapshotsDuringFilesystemTrim }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.fastReplicaRebuildEnabled) }}fast-replica-rebuild-enabled: {{ .Values.defaultSettings.fastReplicaRebuildEnabled }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.replicaFileSyncHttpClientTimeout) }}replica-file-sync-http-client-timeout: {{ .Values.defaultSettings.replicaFileSyncHttpClientTimeout }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.logLevel) }}log-level: {{ .Values.defaultSettings.logLevel }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.backupCompressionMethod) }}backup-compression-method: {{ .Values.defaultSettings.backupCompressionMethod }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.backupConcurrentLimit) }}backup-concurrent-limit: {{ .Values.defaultSettings.backupConcurrentLimit }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.restoreConcurrentLimit) }}restore-concurrent-limit: {{ .Values.defaultSettings.restoreConcurrentLimit }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.v2DataEngine) }}v2-data-engine: {{ .Values.defaultSettings.v2DataEngine }}{{ end }}
+ {{ if not (kindIs "invalid" .Values.defaultSettings.offlineReplicaRebuilding) }}offline-replica-rebuilding: {{ .Values.defaultSettings.offlineReplicaRebuilding }}{{ end }}
diff --git a/charts/longhorn/templates/deployment-driver.yaml b/charts/longhorn/templates/deployment-driver.yaml
new file mode 100644
index 0000000..f162fbf
--- /dev/null
+++ b/charts/longhorn/templates/deployment-driver.yaml
@@ -0,0 +1,118 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: longhorn-driver-deployer
+ namespace: {{ include "release_namespace" . }}
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: longhorn-driver-deployer
+ template:
+ metadata:
+ labels: {{- include "longhorn.labels" . | nindent 8 }}
+ app: longhorn-driver-deployer
+ spec:
+ initContainers:
+ - name: wait-longhorn-manager
+ image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
+ command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done']
+ containers:
+ - name: longhorn-driver-deployer
+ image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command:
+ - longhorn-manager
+ - -d
+ - deploy-driver
+ - --manager-image
+ - "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}"
+ - --manager-url
+ - http://longhorn-backend:9500/v1
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: SERVICE_ACCOUNT
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.serviceAccountName
+ {{- if .Values.csi.kubeletRootDir }}
+ - name: KUBELET_ROOT_DIR
+ value: {{ .Values.csi.kubeletRootDir }}
+ {{- end }}
+ {{- if and .Values.image.csi.attacher.repository .Values.image.csi.attacher.tag }}
+ - name: CSI_ATTACHER_IMAGE
+ value: "{{ template "registry_url" . }}{{ .Values.image.csi.attacher.repository }}:{{ .Values.image.csi.attacher.tag }}"
+ {{- end }}
+ {{- if and .Values.image.csi.provisioner.repository .Values.image.csi.provisioner.tag }}
+ - name: CSI_PROVISIONER_IMAGE
+ value: "{{ template "registry_url" . }}{{ .Values.image.csi.provisioner.repository }}:{{ .Values.image.csi.provisioner.tag }}"
+ {{- end }}
+ {{- if and .Values.image.csi.nodeDriverRegistrar.repository .Values.image.csi.nodeDriverRegistrar.tag }}
+ - name: CSI_NODE_DRIVER_REGISTRAR_IMAGE
+ value: "{{ template "registry_url" . }}{{ .Values.image.csi.nodeDriverRegistrar.repository }}:{{ .Values.image.csi.nodeDriverRegistrar.tag }}"
+ {{- end }}
+ {{- if and .Values.image.csi.resizer.repository .Values.image.csi.resizer.tag }}
+ - name: CSI_RESIZER_IMAGE
+ value: "{{ template "registry_url" . }}{{ .Values.image.csi.resizer.repository }}:{{ .Values.image.csi.resizer.tag }}"
+ {{- end }}
+ {{- if and .Values.image.csi.snapshotter.repository .Values.image.csi.snapshotter.tag }}
+ - name: CSI_SNAPSHOTTER_IMAGE
+ value: "{{ template "registry_url" . }}{{ .Values.image.csi.snapshotter.repository }}:{{ .Values.image.csi.snapshotter.tag }}"
+ {{- end }}
+ {{- if and .Values.image.csi.livenessProbe.repository .Values.image.csi.livenessProbe.tag }}
+ - name: CSI_LIVENESS_PROBE_IMAGE
+ value: "{{ template "registry_url" . }}{{ .Values.image.csi.livenessProbe.repository }}:{{ .Values.image.csi.livenessProbe.tag }}"
+ {{- end }}
+ {{- if .Values.csi.attacherReplicaCount }}
+ - name: CSI_ATTACHER_REPLICA_COUNT
+ value: {{ .Values.csi.attacherReplicaCount | quote }}
+ {{- end }}
+ {{- if .Values.csi.provisionerReplicaCount }}
+ - name: CSI_PROVISIONER_REPLICA_COUNT
+ value: {{ .Values.csi.provisionerReplicaCount | quote }}
+ {{- end }}
+ {{- if .Values.csi.resizerReplicaCount }}
+ - name: CSI_RESIZER_REPLICA_COUNT
+ value: {{ .Values.csi.resizerReplicaCount | quote }}
+ {{- end }}
+ {{- if .Values.csi.snapshotterReplicaCount }}
+ - name: CSI_SNAPSHOTTER_REPLICA_COUNT
+ value: {{ .Values.csi.snapshotterReplicaCount | quote }}
+ {{- end }}
+
+ {{- if .Values.privateRegistry.registrySecret }}
+ imagePullSecrets:
+ - name: {{ .Values.privateRegistry.registrySecret }}
+ {{- end }}
+ {{- if .Values.longhornDriver.priorityClass }}
+ priorityClassName: {{ .Values.longhornDriver.priorityClass | quote }}
+ {{- end }}
+ {{- if or .Values.longhornDriver.tolerations .Values.global.cattle.windowsCluster.enabled }}
+ tolerations:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
+{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
+ {{- end }}
+ {{- if .Values.longhornDriver.tolerations }}
+{{ toYaml .Values.longhornDriver.tolerations | indent 6 }}
+ {{- end }}
+ {{- end }}
+ {{- if or .Values.longhornDriver.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
+ nodeSelector:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
+{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.longhornDriver.nodeSelector }}
+{{ toYaml .Values.longhornDriver.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- end }}
+ serviceAccountName: longhorn-service-account
+ securityContext:
+ runAsUser: 0
diff --git a/charts/longhorn/templates/deployment-ui.yaml b/charts/longhorn/templates/deployment-ui.yaml
new file mode 100644
index 0000000..6bad5cd
--- /dev/null
+++ b/charts/longhorn/templates/deployment-ui.yaml
@@ -0,0 +1,114 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ app: longhorn-ui
+ name: longhorn-ui
+ namespace: {{ include "release_namespace" . }}
+spec:
+ replicas: {{ .Values.longhornUI.replicas }}
+ selector:
+ matchLabels:
+ app: longhorn-ui
+ template:
+ metadata:
+ labels: {{- include "longhorn.labels" . | nindent 8 }}
+ app: longhorn-ui
+ spec:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 1
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - longhorn-ui
+ topologyKey: kubernetes.io/hostname
+ containers:
+ - name: longhorn-ui
+ image: {{ template "registry_url" . }}{{ .Values.image.longhorn.ui.repository }}:{{ .Values.image.longhorn.ui.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ volumeMounts:
+ - name : nginx-cache
+ mountPath: /var/cache/nginx/
+ - name : nginx-config
+ mountPath: /var/config/nginx/
+ - name: var-run
+ mountPath: /var/run/
+ ports:
+ - containerPort: 8000
+ name: http
+ env:
+ - name: LONGHORN_MANAGER_IP
+ value: "http://longhorn-backend:9500"
+ - name: LONGHORN_UI_PORT
+ value: "8000"
+ volumes:
+ - emptyDir: {}
+ name: nginx-cache
+ - emptyDir: {}
+ name: nginx-config
+ - emptyDir: {}
+ name: var-run
+ {{- if .Values.privateRegistry.registrySecret }}
+ imagePullSecrets:
+ - name: {{ .Values.privateRegistry.registrySecret }}
+ {{- end }}
+ {{- if .Values.longhornUI.priorityClass }}
+ priorityClassName: {{ .Values.longhornUI.priorityClass | quote }}
+ {{- end }}
+ {{- if or .Values.longhornUI.tolerations .Values.global.cattle.windowsCluster.enabled }}
+ tolerations:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
+{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
+ {{- end }}
+ {{- if .Values.longhornUI.tolerations }}
+{{ toYaml .Values.longhornUI.tolerations | indent 6 }}
+ {{- end }}
+ {{- end }}
+ {{- if or .Values.longhornUI.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
+ nodeSelector:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
+{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.longhornUI.nodeSelector }}
+{{ toYaml .Values.longhornUI.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- end }}
+---
+kind: Service
+apiVersion: v1
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ app: longhorn-ui
+ {{- if eq .Values.service.ui.type "Rancher-Proxy" }}
+ kubernetes.io/cluster-service: "true"
+ {{- end }}
+ name: longhorn-frontend
+ namespace: {{ include "release_namespace" . }}
+spec:
+ {{- if eq .Values.service.ui.type "Rancher-Proxy" }}
+ type: ClusterIP
+ {{- else }}
+ type: {{ .Values.service.ui.type }}
+ {{- end }}
+ {{- if and .Values.service.ui.loadBalancerIP (eq .Values.service.ui.type "LoadBalancer") }}
+ loadBalancerIP: {{ .Values.service.ui.loadBalancerIP }}
+ {{- end }}
+ {{- if and (eq .Values.service.ui.type "LoadBalancer") .Values.service.ui.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges: {{- toYaml .Values.service.ui.loadBalancerSourceRanges | nindent 4 }}
+ {{- end }}
+ selector:
+ app: longhorn-ui
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ {{- if .Values.service.ui.nodePort }}
+ nodePort: {{ .Values.service.ui.nodePort }}
+ {{- else }}
+ nodePort: null
+ {{- end }}
diff --git a/charts/longhorn/templates/ingress.yaml b/charts/longhorn/templates/ingress.yaml
new file mode 100644
index 0000000..ee47f8b
--- /dev/null
+++ b/charts/longhorn/templates/ingress.yaml
@@ -0,0 +1,48 @@
+{{- if .Values.ingress.enabled }}
+{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: networking.k8s.io/v1
+{{- else -}}
+apiVersion: networking.k8s.io/v1beta1
+{{- end }}
+kind: Ingress
+metadata:
+ name: longhorn-ingress
+ namespace: {{ include "release_namespace" . }}
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ app: longhorn-ingress
+ annotations:
+ {{- if .Values.ingress.secureBackends }}
+ ingress.kubernetes.io/secure-backends: "true"
+ {{- end }}
+ {{- range $key, $value := .Values.ingress.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+spec:
+ {{- if and .Values.ingress.ingressClassName (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
+ ingressClassName: {{ .Values.ingress.ingressClassName }}
+ {{- end }}
+ rules:
+ - host: {{ .Values.ingress.host }}
+ http:
+ paths:
+ - path: {{ default "" .Values.ingress.path }}
+ {{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
+ pathType: ImplementationSpecific
+ {{- end }}
+ backend:
+ {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
+ service:
+ name: longhorn-frontend
+ port:
+ number: 80
+ {{- else }}
+ serviceName: longhorn-frontend
+ servicePort: 80
+ {{- end }}
+{{- if .Values.ingress.tls }}
+ tls:
+ - hosts:
+ - {{ .Values.ingress.host }}
+ secretName: {{ .Values.ingress.tlsSecret }}
+{{- end }}
+{{- end }}
diff --git a/charts/longhorn/templates/network-policies/backing-image-data-source-network-policy.yaml b/charts/longhorn/templates/network-policies/backing-image-data-source-network-policy.yaml
new file mode 100644
index 0000000..cc91054
--- /dev/null
+++ b/charts/longhorn/templates/network-policies/backing-image-data-source-network-policy.yaml
@@ -0,0 +1,27 @@
+{{- if .Values.networkPolicies.enabled }}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: backing-image-data-source
+ namespace: longhorn-system
+spec:
+ podSelector:
+ matchLabels:
+ longhorn.io/component: backing-image-data-source
+ policyTypes:
+ - Ingress
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app: longhorn-manager
+ - podSelector:
+ matchLabels:
+ longhorn.io/component: instance-manager
+ - podSelector:
+ matchLabels:
+ longhorn.io/component: backing-image-manager
+ - podSelector:
+ matchLabels:
+ longhorn.io/component: backing-image-data-source
+{{- end }}
diff --git a/charts/longhorn/templates/network-policies/backing-image-manager-network-policy.yaml b/charts/longhorn/templates/network-policies/backing-image-manager-network-policy.yaml
new file mode 100644
index 0000000..ebc288f
--- /dev/null
+++ b/charts/longhorn/templates/network-policies/backing-image-manager-network-policy.yaml
@@ -0,0 +1,27 @@
+{{- if .Values.networkPolicies.enabled }}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: backing-image-manager
+ namespace: longhorn-system
+spec:
+ podSelector:
+ matchLabels:
+ longhorn.io/component: backing-image-manager
+ policyTypes:
+ - Ingress
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app: longhorn-manager
+ - podSelector:
+ matchLabels:
+ longhorn.io/component: instance-manager
+ - podSelector:
+ matchLabels:
+ longhorn.io/component: backing-image-manager
+ - podSelector:
+ matchLabels:
+ longhorn.io/component: backing-image-data-source
+{{- end }}
diff --git a/charts/longhorn/templates/network-policies/instance-manager-networking.yaml b/charts/longhorn/templates/network-policies/instance-manager-networking.yaml
new file mode 100644
index 0000000..6f03c6e
--- /dev/null
+++ b/charts/longhorn/templates/network-policies/instance-manager-networking.yaml
@@ -0,0 +1,27 @@
+{{- if .Values.networkPolicies.enabled }}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: instance-manager
+ namespace: longhorn-system
+spec:
+ podSelector:
+ matchLabels:
+ longhorn.io/component: instance-manager
+ policyTypes:
+ - Ingress
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app: longhorn-manager
+ - podSelector:
+ matchLabels:
+ longhorn.io/component: instance-manager
+ - podSelector:
+ matchLabels:
+ longhorn.io/component: backing-image-manager
+ - podSelector:
+ matchLabels:
+ longhorn.io/component: backing-image-data-source
+{{- end }}
diff --git a/charts/longhorn/templates/network-policies/manager-network-policy.yaml b/charts/longhorn/templates/network-policies/manager-network-policy.yaml
new file mode 100644
index 0000000..c9d763f
--- /dev/null
+++ b/charts/longhorn/templates/network-policies/manager-network-policy.yaml
@@ -0,0 +1,35 @@
+{{- if .Values.networkPolicies.enabled }}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: longhorn-manager
+ namespace: longhorn-system
+spec:
+ podSelector:
+ matchLabels:
+ app: longhorn-manager
+ policyTypes:
+ - Ingress
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app: longhorn-manager
+ - podSelector:
+ matchLabels:
+ app: longhorn-ui
+ - podSelector:
+ matchLabels:
+ app: longhorn-csi-plugin
+ - podSelector:
+ matchLabels:
+ longhorn.io/managed-by: longhorn-manager
+ matchExpressions:
+ - { key: recurring-job.longhorn.io, operator: Exists }
+ - podSelector:
+ matchExpressions:
+ - { key: longhorn.io/job-task, operator: Exists }
+ - podSelector:
+ matchLabels:
+ app: longhorn-driver-deployer
+{{- end }}
diff --git a/charts/longhorn/templates/network-policies/recovery-backend-network-policy.yaml b/charts/longhorn/templates/network-policies/recovery-backend-network-policy.yaml
new file mode 100644
index 0000000..cebe485
--- /dev/null
+++ b/charts/longhorn/templates/network-policies/recovery-backend-network-policy.yaml
@@ -0,0 +1,17 @@
+{{- if .Values.networkPolicies.enabled }}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: longhorn-recovery-backend
+ namespace: longhorn-system
+spec:
+ podSelector:
+ matchLabels:
+ app: longhorn-manager
+ policyTypes:
+ - Ingress
+ ingress:
+ - ports:
+ - protocol: TCP
+ port: 9503
+{{- end }}
diff --git a/charts/longhorn/templates/network-policies/ui-frontend-network-policy.yaml b/charts/longhorn/templates/network-policies/ui-frontend-network-policy.yaml
new file mode 100644
index 0000000..04c8beb
--- /dev/null
+++ b/charts/longhorn/templates/network-policies/ui-frontend-network-policy.yaml
@@ -0,0 +1,46 @@
+{{- if and .Values.networkPolicies.enabled .Values.ingress.enabled (not (eq .Values.networkPolicies.type "")) }}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: longhorn-ui-frontend
+ namespace: longhorn-system
+spec:
+ podSelector:
+ matchLabels:
+ app: longhorn-ui
+ policyTypes:
+ - Ingress
+ ingress:
+ - from:
+ {{- if eq .Values.networkPolicies.type "rke1"}}
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: ingress-nginx
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ {{- else if eq .Values.networkPolicies.type "rke2" }}
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: kube-system
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: rke2-ingress-nginx
+ app.kubernetes.io/name: rke2-ingress-nginx
+ {{- else if eq .Values.networkPolicies.type "k3s" }}
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: kube-system
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: traefik
+ ports:
+ - port: 8000
+ protocol: TCP
+ - port: 80
+ protocol: TCP
+ {{- end }}
+{{- end }}
diff --git a/charts/longhorn/templates/network-policies/webhook-network-policy.yaml b/charts/longhorn/templates/network-policies/webhook-network-policy.yaml
new file mode 100644
index 0000000..c9790f6
--- /dev/null
+++ b/charts/longhorn/templates/network-policies/webhook-network-policy.yaml
@@ -0,0 +1,33 @@
+{{- if .Values.networkPolicies.enabled }}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: longhorn-conversion-webhook
+ namespace: longhorn-system
+spec:
+ podSelector:
+ matchLabels:
+ app: longhorn-manager
+ policyTypes:
+ - Ingress
+ ingress:
+ - ports:
+ - protocol: TCP
+ port: 9501
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: longhorn-admission-webhook
+ namespace: longhorn-system
+spec:
+ podSelector:
+ matchLabels:
+ app: longhorn-manager
+ policyTypes:
+ - Ingress
+ ingress:
+ - ports:
+ - protocol: TCP
+ port: 9502
+{{- end }}
diff --git a/charts/longhorn/templates/postupgrade-job.yaml b/charts/longhorn/templates/postupgrade-job.yaml
new file mode 100644
index 0000000..bb25a54
--- /dev/null
+++ b/charts/longhorn/templates/postupgrade-job.yaml
@@ -0,0 +1,56 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ annotations:
+ "helm.sh/hook": post-upgrade
+ "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation
+ name: longhorn-post-upgrade
+ namespace: {{ include "release_namespace" . }}
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+spec:
+ activeDeadlineSeconds: 900
+ backoffLimit: 1
+ template:
+ metadata:
+ name: longhorn-post-upgrade
+ labels: {{- include "longhorn.labels" . | nindent 8 }}
+ spec:
+ containers:
+ - name: longhorn-post-upgrade
+ image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command:
+ - longhorn-manager
+ - post-upgrade
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ restartPolicy: OnFailure
+ {{- if .Values.privateRegistry.registrySecret }}
+ imagePullSecrets:
+ - name: {{ .Values.privateRegistry.registrySecret }}
+ {{- end }}
+ {{- if .Values.longhornManager.priorityClass }}
+ priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
+ {{- end }}
+ serviceAccountName: longhorn-service-account
+ {{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
+ tolerations:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
+{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
+ {{- end }}
+ {{- if .Values.longhornManager.tolerations }}
+{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
+ {{- end }}
+ {{- end }}
+ {{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
+ nodeSelector:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
+{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.longhornManager.nodeSelector }}
+{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- end }}
diff --git a/charts/longhorn/templates/preupgrade-job.yaml b/charts/longhorn/templates/preupgrade-job.yaml
new file mode 100644
index 0000000..357e6d7
--- /dev/null
+++ b/charts/longhorn/templates/preupgrade-job.yaml
@@ -0,0 +1,58 @@
+{{- if .Values.helmPreUpgradeCheckerJob.enabled }}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ annotations:
+ "helm.sh/hook": pre-upgrade
+ "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation,hook-failed
+ name: longhorn-pre-upgrade
+ namespace: {{ include "release_namespace" . }}
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+spec:
+ activeDeadlineSeconds: 900
+ backoffLimit: 1
+ template:
+ metadata:
+ name: longhorn-pre-upgrade
+ labels: {{- include "longhorn.labels" . | nindent 8 }}
+ spec:
+ containers:
+ - name: longhorn-pre-upgrade
+ image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command:
+ - longhorn-manager
+ - pre-upgrade
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ restartPolicy: OnFailure
+ {{- if .Values.privateRegistry.registrySecret }}
+ imagePullSecrets:
+ - name: {{ .Values.privateRegistry.registrySecret }}
+ {{- end }}
+ {{- if .Values.longhornManager.priorityClass }}
+ priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
+ {{- end }}
+ serviceAccountName: longhorn-service-account
+ {{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
+ tolerations:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
+{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
+ {{- end }}
+ {{- if .Values.longhornManager.tolerations }}
+{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
+ {{- end }}
+ {{- end }}
+ {{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
+ nodeSelector:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
+{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.longhornManager.nodeSelector }}
+{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/longhorn/templates/psp.yaml b/charts/longhorn/templates/psp.yaml
new file mode 100644
index 0000000..a2dfc05
--- /dev/null
+++ b/charts/longhorn/templates/psp.yaml
@@ -0,0 +1,66 @@
+{{- if .Values.enablePSP }}
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: longhorn-psp
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+spec:
+ privileged: true
+ allowPrivilegeEscalation: true
+ requiredDropCapabilities:
+ - NET_RAW
+ allowedCapabilities:
+ - SYS_ADMIN
+ hostNetwork: false
+ hostIPC: false
+ hostPID: true
+ runAsUser:
+ rule: RunAsAny
+ seLinux:
+ rule: RunAsAny
+ fsGroup:
+ rule: RunAsAny
+ supplementalGroups:
+ rule: RunAsAny
+ volumes:
+ - configMap
+ - downwardAPI
+ - emptyDir
+ - secret
+ - projected
+ - hostPath
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: longhorn-psp-role
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ namespace: {{ include "release_namespace" . }}
+rules:
+- apiGroups:
+ - policy
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
+ resourceNames:
+ - longhorn-psp
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: longhorn-psp-binding
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ namespace: {{ include "release_namespace" . }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: longhorn-psp-role
+subjects:
+- kind: ServiceAccount
+ name: longhorn-service-account
+ namespace: {{ include "release_namespace" . }}
+- kind: ServiceAccount
+ name: default
+ namespace: {{ include "release_namespace" . }}
+{{- end }}
diff --git a/charts/longhorn/templates/registry-secret.yaml b/charts/longhorn/templates/registry-secret.yaml
new file mode 100644
index 0000000..3c6b1dc
--- /dev/null
+++ b/charts/longhorn/templates/registry-secret.yaml
@@ -0,0 +1,13 @@
+{{- if .Values.privateRegistry.createSecret }}
+{{- if .Values.privateRegistry.registrySecret }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Values.privateRegistry.registrySecret }}
+ namespace: {{ include "release_namespace" . }}
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+type: kubernetes.io/dockerconfigjson
+data:
+ .dockerconfigjson: {{ template "secret" . }}
+{{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/longhorn/templates/serviceaccount.yaml b/charts/longhorn/templates/serviceaccount.yaml
new file mode 100644
index 0000000..a563d68
--- /dev/null
+++ b/charts/longhorn/templates/serviceaccount.yaml
@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: longhorn-service-account
+ namespace: {{ include "release_namespace" . }}
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: longhorn-support-bundle
+ namespace: {{ include "release_namespace" . }}
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
\ No newline at end of file
diff --git a/charts/longhorn/templates/services.yaml b/charts/longhorn/templates/services.yaml
new file mode 100644
index 0000000..7da9d18
--- /dev/null
+++ b/charts/longhorn/templates/services.yaml
@@ -0,0 +1,74 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ app: longhorn-conversion-webhook
+ name: longhorn-conversion-webhook
+ namespace: {{ include "release_namespace" . }}
+spec:
+ type: ClusterIP
+ sessionAffinity: ClientIP
+ selector:
+ app: longhorn-manager
+ ports:
+ - name: conversion-webhook
+ port: 9501
+ targetPort: conversion-wh
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ app: longhorn-admission-webhook
+ name: longhorn-admission-webhook
+ namespace: {{ include "release_namespace" . }}
+spec:
+ type: ClusterIP
+ sessionAffinity: ClientIP
+ selector:
+ app: longhorn-manager
+ ports:
+ - name: admission-webhook
+ port: 9502
+ targetPort: admission-wh
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ app: longhorn-recovery-backend
+ name: longhorn-recovery-backend
+ namespace: {{ include "release_namespace" . }}
+spec:
+ type: ClusterIP
+ sessionAffinity: ClientIP
+ selector:
+ app: longhorn-manager
+ ports:
+ - name: recovery-backend
+ port: 9503
+ targetPort: recov-backend
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ name: longhorn-engine-manager
+ namespace: {{ include "release_namespace" . }}
+spec:
+ clusterIP: None
+ selector:
+ longhorn.io/component: instance-manager
+ longhorn.io/instance-manager-type: engine
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+ name: longhorn-replica-manager
+ namespace: {{ include "release_namespace" . }}
+spec:
+ clusterIP: None
+ selector:
+ longhorn.io/component: instance-manager
+ longhorn.io/instance-manager-type: replica
diff --git a/charts/longhorn/templates/storageclass.yaml b/charts/longhorn/templates/storageclass.yaml
new file mode 100644
index 0000000..6832517
--- /dev/null
+++ b/charts/longhorn/templates/storageclass.yaml
@@ -0,0 +1,44 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: longhorn-storageclass
+ namespace: {{ include "release_namespace" . }}
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+data:
+ storageclass.yaml: |
+ kind: StorageClass
+ apiVersion: storage.k8s.io/v1
+ metadata:
+ name: longhorn
+ annotations:
+ storageclass.kubernetes.io/is-default-class: {{ .Values.persistence.defaultClass | quote }}
+ provisioner: driver.longhorn.io
+ allowVolumeExpansion: true
+ reclaimPolicy: "{{ .Values.persistence.reclaimPolicy }}"
+ volumeBindingMode: Immediate
+ parameters:
+ numberOfReplicas: "{{ .Values.persistence.defaultClassReplicaCount }}"
+ staleReplicaTimeout: "30"
+ fromBackup: ""
+ {{- if .Values.persistence.defaultFsType }}
+ fsType: "{{ .Values.persistence.defaultFsType }}"
+ {{- end }}
+ {{- if .Values.persistence.defaultMkfsParams }}
+ mkfsParams: "{{ .Values.persistence.defaultMkfsParams }}"
+ {{- end }}
+ {{- if .Values.persistence.migratable }}
+ migratable: "{{ .Values.persistence.migratable }}"
+ {{- end }}
+ {{- if .Values.persistence.backingImage.enable }}
+ backingImage: {{ .Values.persistence.backingImage.name }}
+ backingImageDataSourceType: {{ .Values.persistence.backingImage.dataSourceType }}
+ backingImageDataSourceParameters: {{ .Values.persistence.backingImage.dataSourceParameters }}
+ backingImageChecksum: {{ .Values.persistence.backingImage.expectedChecksum }}
+ {{- end }}
+ {{- if .Values.persistence.recurringJobSelector.enable }}
+ recurringJobSelector: '{{ .Values.persistence.recurringJobSelector.jobList }}'
+ {{- end }}
+ dataLocality: {{ .Values.persistence.defaultDataLocality | quote }}
+ {{- if .Values.persistence.defaultNodeSelector.enable }}
+ nodeSelector: "{{ .Values.persistence.defaultNodeSelector.selector }}"
+ {{- end }}
diff --git a/charts/longhorn/templates/tls-secrets.yaml b/charts/longhorn/templates/tls-secrets.yaml
new file mode 100644
index 0000000..74c4342
--- /dev/null
+++ b/charts/longhorn/templates/tls-secrets.yaml
@@ -0,0 +1,16 @@
+{{- if .Values.ingress.enabled }}
+{{- range .Values.ingress.secrets }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .name }}
+ namespace: {{ include "release_namespace" $ }}
+ labels: {{- include "longhorn.labels" $ | nindent 4 }}
+ app: longhorn
+type: kubernetes.io/tls
+data:
+ tls.crt: {{ .certificate | b64enc }}
+ tls.key: {{ .key | b64enc }}
+---
+{{- end }}
+{{- end }}
diff --git a/charts/longhorn/templates/uninstall-job.yaml b/charts/longhorn/templates/uninstall-job.yaml
new file mode 100644
index 0000000..968f420
--- /dev/null
+++ b/charts/longhorn/templates/uninstall-job.yaml
@@ -0,0 +1,57 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ annotations:
+ "helm.sh/hook": pre-delete
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ name: longhorn-uninstall
+ namespace: {{ include "release_namespace" . }}
+ labels: {{- include "longhorn.labels" . | nindent 4 }}
+spec:
+ activeDeadlineSeconds: 900
+ backoffLimit: 1
+ template:
+ metadata:
+ name: longhorn-uninstall
+ labels: {{- include "longhorn.labels" . | nindent 8 }}
+ spec:
+ containers:
+ - name: longhorn-uninstall
+ image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command:
+ - longhorn-manager
+ - uninstall
+ - --force
+ env:
+ - name: LONGHORN_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ restartPolicy: Never
+ {{- if .Values.privateRegistry.registrySecret }}
+ imagePullSecrets:
+ - name: {{ .Values.privateRegistry.registrySecret }}
+ {{- end }}
+ {{- if .Values.longhornManager.priorityClass }}
+ priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
+ {{- end }}
+ serviceAccountName: longhorn-service-account
+ {{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
+ tolerations:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
+{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
+ {{- end }}
+ {{- if .Values.longhornManager.tolerations }}
+{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
+ {{- end }}
+ {{- end }}
+ {{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
+ nodeSelector:
+ {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
+{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if or .Values.longhornManager.nodeSelector }}
+{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- end }}
diff --git a/charts/longhorn/templates/validate-psp-install.yaml b/charts/longhorn/templates/validate-psp-install.yaml
new file mode 100644
index 0000000..0df98e3
--- /dev/null
+++ b/charts/longhorn/templates/validate-psp-install.yaml
@@ -0,0 +1,7 @@
+#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
+#{{- if .Values.enablePSP }}
+#{{- if not (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }}
+#{{- fail "The target cluster does not have the PodSecurityPolicy API resource. Please disable PSPs in this chart before proceeding." -}}
+#{{- end }}
+#{{- end }}
+#{{- end }}
\ No newline at end of file
diff --git a/charts/longhorn/values.yaml b/charts/longhorn/values.yaml
new file mode 100644
index 0000000..bad9882
--- /dev/null
+++ b/charts/longhorn/values.yaml
@@ -0,0 +1,296 @@
+# Default values for longhorn.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+global:
+ cattle:
+ systemDefaultRegistry: ""
+ windowsCluster:
+ # Enable this to allow Longhorn to run on the Rancher deployed Windows cluster
+ enabled: false
+ # Tolerate Linux node taint
+ tolerations:
+ - key: "cattle.io/os"
+ value: "linux"
+ effect: "NoSchedule"
+ operator: "Equal"
+ # Select Linux nodes
+ nodeSelector:
+ kubernetes.io/os: "linux"
+ # Recognize toleration and node selector for Longhorn run-time created components
+ defaultSetting:
+ taintToleration: cattle.io/os=linux:NoSchedule
+ systemManagedComponentsNodeSelector: kubernetes.io/os:linux
+
+networkPolicies:
+ enabled: false
+ # Available types: k3s, rke2, rke1
+ type: "k3s"
+
+image:
+ longhorn:
+ engine:
+ repository: longhornio/longhorn-engine
+ tag: v1.5.2
+ manager:
+ repository: longhornio/longhorn-manager
+ tag: v1.5.2
+ ui:
+ repository: longhornio/longhorn-ui
+ tag: v1.5.2
+ instanceManager:
+ repository: longhornio/longhorn-instance-manager
+ tag: v1.5.2
+ shareManager:
+ repository: longhornio/longhorn-share-manager
+ tag: v1.5.2
+ backingImageManager:
+ repository: longhornio/backing-image-manager
+ tag: v1.5.2
+ supportBundleKit:
+ repository: longhornio/support-bundle-kit
+ tag: v0.0.27
+ csi:
+ attacher:
+ repository: longhornio/csi-attacher
+ tag: v4.2.0
+ provisioner:
+ repository: longhornio/csi-provisioner
+ tag: v3.4.1
+ nodeDriverRegistrar:
+ repository: longhornio/csi-node-driver-registrar
+ tag: v2.7.0
+ resizer:
+ repository: longhornio/csi-resizer
+ tag: v1.7.0
+ snapshotter:
+ repository: longhornio/csi-snapshotter
+ tag: v6.2.1
+ livenessProbe:
+ repository: longhornio/livenessprobe
+ tag: v2.9.0
+ pullPolicy: IfNotPresent
+
+service:
+ ui:
+ type: ClusterIP
+ nodePort: null
+ manager:
+ type: ClusterIP
+ nodePort: ""
+ loadBalancerIP: ""
+ loadBalancerSourceRanges: ""
+
+persistence:
+ defaultClass: true
+ defaultFsType: ext4
+ defaultMkfsParams: ""
+ defaultClassReplicaCount: 3
+ defaultDataLocality: disabled # best-effort otherwise
+ reclaimPolicy: Delete
+ migratable: false
+ recurringJobSelector:
+ enable: false
+ jobList: []
+ backingImage:
+ enable: false
+ name: ~
+ dataSourceType: ~
+ dataSourceParameters: ~
+ expectedChecksum: ~
+ defaultNodeSelector:
+ enable: false # disable by default
+ selector: ""
+ removeSnapshotsDuringFilesystemTrim: ignored # "enabled" or "disabled" otherwise
+
+helmPreUpgradeCheckerJob:
+ enabled: true
+
+csi:
+ kubeletRootDir: ~
+ attacherReplicaCount: ~
+ provisionerReplicaCount: ~
+ resizerReplicaCount: ~
+ snapshotterReplicaCount: ~
+
+defaultSettings:
+ backupTarget: ~
+ backupTargetCredentialSecret: ~
+ allowRecurringJobWhileVolumeDetached: ~
+ createDefaultDiskLabeledNodes: ~
+ defaultDataPath: ~
+ defaultDataLocality: ~
+ replicaSoftAntiAffinity: ~
+ replicaAutoBalance: ~
+ storageOverProvisioningPercentage: ~
+ storageMinimalAvailablePercentage: ~
+ storageReservedPercentageForDefaultDisk: ~
+ upgradeChecker: ~
+ defaultReplicaCount: ~
+ defaultLonghornStaticStorageClass: ~
+ backupstorePollInterval: ~
+ failedBackupTTL: ~
+ restoreVolumeRecurringJobs: ~
+ recurringSuccessfulJobsHistoryLimit: ~
+ recurringFailedJobsHistoryLimit: ~
+ supportBundleFailedHistoryLimit: ~
+ taintToleration: ~
+ systemManagedComponentsNodeSelector: ~
+ priorityClass: ~
+ autoSalvage: ~
+ autoDeletePodWhenVolumeDetachedUnexpectedly: ~
+ disableSchedulingOnCordonedNode: ~
+ replicaZoneSoftAntiAffinity: ~
+ nodeDownPodDeletionPolicy: ~
+ nodeDrainPolicy: ~
+ replicaReplenishmentWaitInterval: ~
+ concurrentReplicaRebuildPerNodeLimit: ~
+ concurrentVolumeBackupRestorePerNodeLimit: ~
+ disableRevisionCounter: ~
+ systemManagedPodsImagePullPolicy: ~
+ allowVolumeCreationWithDegradedAvailability: ~
+ autoCleanupSystemGeneratedSnapshot: ~
+ concurrentAutomaticEngineUpgradePerNodeLimit: ~
+ backingImageCleanupWaitInterval: ~
+ backingImageRecoveryWaitInterval: ~
+ guaranteedInstanceManagerCPU: ~
+ kubernetesClusterAutoscalerEnabled: ~
+ orphanAutoDeletion: ~
+ storageNetwork: ~
+ deletingConfirmationFlag: ~
+ engineReplicaTimeout: ~
+ snapshotDataIntegrity: ~
+ snapshotDataIntegrityImmediateCheckAfterSnapshotCreation: ~
+ snapshotDataIntegrityCronjob: ~
+ removeSnapshotsDuringFilesystemTrim: ~
+ fastReplicaRebuildEnabled: ~
+ replicaFileSyncHttpClientTimeout: ~
+ logLevel: ~
+ backupCompressionMethod: ~
+ backupConcurrentLimit: ~
+ restoreConcurrentLimit: ~
+ v2DataEngine: ~
+ offlineReplicaRebuilding: ~
+privateRegistry:
+ createSecret: ~
+ registryUrl: ~
+ registryUser: ~
+ registryPasswd: ~
+ registrySecret: ~
+
+longhornManager:
+ log:
+ ## Allowed values are `plain` or `json`.
+ format: plain
+ priorityClass: ~
+ tolerations: []
+ ## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above
+ ## and uncomment this example block
+ # - key: "key"
+ # operator: "Equal"
+ # value: "value"
+ # effect: "NoSchedule"
+ nodeSelector: {}
+ ## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above
+ ## and uncomment this example block
+ # label-key1: "label-value1"
+ # label-key2: "label-value2"
+ serviceAnnotations: {}
+ ## If you want to set annotations for the Longhorn Manager service, delete the `{}` in the line above
+ ## and uncomment this example block
+ # annotation-key1: "annotation-value1"
+ # annotation-key2: "annotation-value2"
+
+longhornDriver:
+ priorityClass: ~
+ tolerations: []
+ ## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above
+ ## and uncomment this example block
+ # - key: "key"
+ # operator: "Equal"
+ # value: "value"
+ # effect: "NoSchedule"
+ nodeSelector: {}
+ ## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above
+ ## and uncomment this example block
+ # label-key1: "label-value1"
+ # label-key2: "label-value2"
+
+longhornUI:
+ replicas: 2
+ priorityClass: ~
+ tolerations: []
+ ## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above
+ ## and uncomment this example block
+ # - key: "key"
+ # operator: "Equal"
+ # value: "value"
+ # effect: "NoSchedule"
+ nodeSelector: {}
+ ## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above
+ ## and uncomment this example block
+ # label-key1: "label-value1"
+ # label-key2: "label-value2"
+
+ingress:
+ ## Set to true to enable ingress record generation
+ enabled: false
+
+ ## Add ingressClassName to the Ingress
+ ## Can replace the kubernetes.io/ingress.class annotation on v1.18+
+ ingressClassName: ~
+
+ host: sslip.io
+
+ ## Set this to true in order to enable TLS on the ingress record
+ tls: false
+
+ ## Enable this in order to enable that the backend service will be connected at port 443
+ secureBackends: false
+
+ ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
+ tlsSecret: longhorn.local-tls
+
+ ## If ingress is enabled you can set the default ingress path
+ ## then you can access the UI by using the following full path {{host}}+{{path}}
+ path: /
+
+ ## Ingress annotations done as key:value pairs
+ ## If you're using kube-lego, you will want to add:
+ ## kubernetes.io/tls-acme: true
+ ##
+ ## For a full list of possible ingress annotations, please see
+ ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md
+ ##
+ ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
+ annotations:
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: true
+
+ secrets:
+ ## If you're providing your own certificates, please use this to add the certificates as secrets
+ ## key and certificate should start with -----BEGIN CERTIFICATE----- or
+ ## -----BEGIN RSA PRIVATE KEY-----
+ ##
+ ## name should line up with a tlsSecret set further up
+ ## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set
+ ##
+ ## It is also possible to create and manage the certificates outside of this helm chart
+ ## Please see README.md for more information
+ # - name: longhorn.local-tls
+ # key:
+ # certificate:
+
+# For Kubernetes < v1.25, if your cluster enables Pod Security Policy admission controller,
+# set this to `true` to ship longhorn-psp which allow privileged Longhorn pods to start
+enablePSP: false
+
+## Specify override namespace, specifically this is useful for using longhorn as sub-chart
+## and its release namespace is not the `longhorn-system`
+namespaceOverride: ""
+
+# Annotations to add to the Longhorn Manager DaemonSet Pods. Optional.
+annotations: {}
+
+serviceAccount:
+ # Annotations to add to the service account
+ annotations: {}
diff --git a/charts/maddy/.helmignore b/charts/maddy/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/maddy/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/maddy/Chart.yaml b/charts/maddy/Chart.yaml
new file mode 100644
index 0000000..8476282
--- /dev/null
+++ b/charts/maddy/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: maddy
+description: A Helm chart for Maddy email server running on PCloud
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/maddy/templates/config.yaml b/charts/maddy/templates/config.yaml
new file mode 100644
index 0000000..166adc3
--- /dev/null
+++ b/charts/maddy/templates/config.yaml
@@ -0,0 +1,122 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: config
+ namespace: {{ .Release.Namespace }}
+data:
+ maddy.conf: |
+ $(hostname) = mail.{{ .Values.ingress.public.domain }}
+ $(primary_domain) = {{ .Values.ingress.public.domain }}
+ $(local_domains) = $(primary_domain)
+
+ auth.pass_table local_authdb {
+ table sql_table {
+ driver sqlite3
+ dsn credentials.db
+ table_name passwords
+ }
+ }
+
+ storage.imapsql local_mailboxes {
+ driver sqlite3
+ dsn imapsql.db
+ }
+
+ hostname $(hostname)
+
+ msgpipeline local_routing {
+ destination postmaster $(local_domains) {
+ modify {
+ replace_rcpt regexp "(.+)\+(.+)@(.+)" "$1@$3"
+ replace_rcpt file /etc/maddy/aliases
+ }
+
+ deliver_to &local_mailboxes
+ }
+
+ default_destination {
+ reject 550 5.1.1 "User doesn't exist"
+ }
+ }
+
+ smtp tcp://0.0.0.0:25 {
+ tls off
+
+ limits {
+ # Up to 20 msgs/sec across max. 10 SMTP connections.
+ all rate 20 1s
+ all concurrency 10
+ }
+
+ dmarc yes
+ check {
+ require_mx_record
+ dkim
+ spf
+ }
+
+ source $(local_domains) {
+ reject 501 5.1.8 "Use Submission for outgoing SMTP"
+ }
+ default_source {
+ destination postmaster $(local_domains) {
+ deliver_to &local_routing
+ }
+ default_destination {
+ reject 550 5.1.1 "User doesn't exist"
+ }
+ }
+ }
+
+ submission tls://0.0.0.0:465 tcp://0.0.0.0:587 {
+ tls file /etc/maddy/certs-private/tls.crt /etc/maddy/certs-private/tls.key
+
+ limits {
+ # Up to 50 msgs/sec across any amount of SMTP connections.
+ all rate 50 1s
+ }
+
+ auth &local_authdb
+
+ source $(local_domains) {
+ destination postmaster $(local_domains) {
+ deliver_to &local_routing
+ }
+ default_destination {
+ # modify {
+ # dkim $(primary_domain) $(local_domains) default
+ # }
+ deliver_to &remote_queue
+ }
+ }
+ default_source {
+ reject 501 5.1.8 "Non-local sender domain"
+ }
+ }
+
+ target.smtp outbound_gateway {
+ hostname {{ .Values.mailGateway.mxHostname }}
+ attempt_starttls false
+ require_tls no
+ auth forward
+ targets {{ .Values.mailGateway.address}}
+ }
+
+ target.queue remote_queue {
+ target &outbound_gateway
+ autogenerated_msg_domain $(primary_domain)
+ bounce {
+ destination postmaster $(local_domains) {
+ deliver_to &local_routing
+ }
+ default_destination {
+ reject 550 5.0.0 "Refusing to send DSNs to non-local addresses"
+ }
+ }
+ }
+
+ imap tls://0.0.0.0:993 tcp://0.0.0.0:143 {
+ tls file /etc/maddy/certs-private/tls.crt /etc/maddy/certs-private/tls.key
+ auth &local_authdb
+ storage &local_mailboxes
+ }
diff --git a/charts/maddy/templates/install.yaml b/charts/maddy/templates/install.yaml
new file mode 100644
index 0000000..ccc423a
--- /dev/null
+++ b/charts/maddy/templates/install.yaml
@@ -0,0 +1,176 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: maddy
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: maddy
+ ports:
+ - name: imap
+ port: 143
+ protocol: TCP
+ - name: imaps
+ port: 993
+ protocol: TCP
+ - name: smtp
+ port: 25
+ protocol: TCP
+ - name: smtps
+ port: 465
+ protocol: TCP
+ - name: submission
+ port: 587
+ protocol: TCP
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: manage
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: maddy
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: manage
+ namespace: {{ .Release.Namespace }}
+spec:
+ ingressClassName: {{ .Values.ingress.private.className }}
+ tls:
+ - hosts:
+ - mail.{{ .Values.ingress.private.domain }}
+ secretName: cert-wildcard.{{ .Values.ingress.private.domain }}
+ rules:
+ - host: mail.{{ .Values.ingress.private.domain }}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: manage
+ port:
+ name: http
+---
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: mail.{{ .Values.ingress.public.domain }}
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ helm.sh/resource-policy: keep
+spec:
+ dnsNames:
+ - 'mail.{{ .Values.ingress.public.domain }}'
+ issuerRef:
+ name: {{ .Values.ingress.public.certificateIssuer }}
+ kind: ClusterIssuer
+ secretName: cert-mail.{{ .Values.ingress.public.domain }}
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: data
+ namespace: {{ .Release.Namespace }}
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.storage.size }}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: maddy
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: maddy
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: maddy
+ annotations:
+ checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum }}
+ spec:
+ volumes:
+ - name: config
+ configMap:
+ name: config
+ - name: certs
+ secret:
+ secretName: cert-mail.{{ .Values.ingress.public.domain}}
+ - name: certs-private
+ secret:
+ secretName: cert-wildcard.{{ .Values.ingress.private.domain}}
+ - name: data
+ persistentVolumeClaim:
+ claimName: data
+ containers:
+ - name: maddy
+ image: giolekva/maddy:v0.4.4
+ imagePullPolicy: Always
+ ports:
+ - name: imap
+ containerPort: 143
+ protocol: TCP
+ - name: imaps
+ containerPort: 993
+ protocol: TCP
+ - name: smtp
+ containerPort: 25
+ protocol: TCP
+ - name: smtps
+ containerPort: 465
+ protocol: TCP
+ - name: submission
+ containerPort: 587
+ protocol: TCP
+ command:
+ - maddy
+ - -config
+ - /etc/maddy/config/maddy.conf
+ volumeMounts:
+ - name: config
+ mountPath: /etc/maddy/config
+ readOnly: true
+ - name: certs-private
+ mountPath: /etc/maddy/certs-private
+ readOnly: true
+ - name: data
+ mountPath: /var/lib/maddy
+ readOnly: false
+ - name: web
+ image: giolekva/maddy-web:latest
+ imagePullPolicy: Always
+ ports:
+ - name: http
+ containerPort: 80
+ protocol: TCP
+ command:
+ - maddy-web
+ - --port=80
+ - --maddy-config=/etc/maddy/config/maddy.conf
+ volumeMounts:
+ - name: config
+ mountPath: /etc/maddy/config
+ readOnly: true
+ - name: certs
+ mountPath: /etc/maddy/certs
+ readOnly: true
+ - name: data
+ mountPath: /var/lib/maddy
+ readOnly: false
diff --git a/charts/maddy/templates/mta-sts.yaml b/charts/maddy/templates/mta-sts.yaml
new file mode 100644
index 0000000..c44e3dd
--- /dev/null
+++ b/charts/maddy/templates/mta-sts.yaml
@@ -0,0 +1,101 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: mta-sts
+ namespace: {{ .Release.Namespace }}
+data:
+ mta-sts.txt: |
+ version: STSv1
+ mode: enforce
+ max_age: 604800
+ mx: {{ .Values.mailGateway.mxHostname }}
+---
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: mta-sts.{{ .Values.ingress.public.domain }}
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ helm.sh/resource-policy: keep
+spec:
+ dnsNames:
+ - 'mta-sts.{{ .Values.ingress.public.domain }}'
+ issuerRef:
+ name: {{ .Values.ingress.public.certificateIssuer }}
+ kind: ClusterIssuer
+ secretName: cert-mta-sts.{{ .Values.ingress.public.domain }}
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: mta-sts
+ namespace: {{ .Release.Namespace }}
+spec:
+ ingressClassName: {{ .Values.ingress.public.className }}
+ tls:
+ - hosts:
+ - mta-sts.{{ .Values.ingress.public.domain }}
+ secretName: cert-mta-sts.{{ .Values.ingress.public.domain }}
+ rules:
+ - host: mta-sts.{{ .Values.ingress.public.domain }}
+ http:
+ paths:
+ - pathType: Prefix
+ path: "/"
+ backend:
+ service:
+ name: mta-sts
+ port:
+ name: http
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: mta-sts
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: mta-sts
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ protocol: TCP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: mta-sts
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: mta-sts
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: mta-sts
+ spec:
+ volumes:
+ - name: mta-sts
+ configMap:
+ name: mta-sts
+ containers:
+ - name: maddy
+ image: giolekva/static-file-server:latest
+ imagePullPolicy: Always
+ ports:
+ - name: http
+ containerPort: 80
+ protocol: TCP
+ command:
+ - static-file-server
+ - --port=80
+ - --dir=/etc/static-file-server/data
+ volumeMounts:
+ - name: mta-sts
+ mountPath: /etc/static-file-server/data/.well-known
+ readOnly: true
diff --git a/charts/maddy/values.yaml b/charts/maddy/values.yaml
new file mode 100644
index 0000000..984109e
--- /dev/null
+++ b/charts/maddy/values.yaml
@@ -0,0 +1,13 @@
+mailGateway:
+ mxHostname: mail.example.com
+ address: tcp://maddy.pcloud-mail-gateway.svc.cluster.local:587
+ingress:
+ private:
+ className: ingress-private
+ domain: p.example.com
+ public:
+ className: ingress-public
+ domain: example.com
+ certificateIssuer: issuer-public
+storage:
+ size: 1Gi
diff --git a/charts/mail-gateway/.helmignore b/charts/mail-gateway/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/mail-gateway/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/mail-gateway/Chart.yaml b/charts/mail-gateway/Chart.yaml
new file mode 100644
index 0000000..8a533ea
--- /dev/null
+++ b/charts/mail-gateway/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: mail-gateway
+description: A Helm chart for Email gateway running on PCloud
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/mail-gateway/templates/config.yaml b/charts/mail-gateway/templates/config.yaml
new file mode 100644
index 0000000..457f524
--- /dev/null
+++ b/charts/mail-gateway/templates/config.yaml
@@ -0,0 +1,144 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: maddy
+ namespace: {{ .Release.Namespace }}
+data:
+ smtp-servers.conf: |
+ maddy.{{ .Values.domains.primary.namespace}}.svc.cluster.local:587
+ {{ range .Values.domains.others}}
+ maddy.{{ .namespace }}.svc.cluster.local:587
+ {{ end }}
+ maddy.conf: |
+ $(hostname) = {{ .Values.domains.primary.mx }}
+ $(primary_domain) = {{ .Values.domains.primary.name }}
+ $(local_domains) = {{ .Values.domains.primary.name }}{{ range .Values.domains.others }} {{ .name }}{{ end }}
+
+ tls file /etc/maddy/certs/tls.crt /etc/maddy/certs/tls.key
+
+ auth.external authsmtp {
+ helper /usr/bin/auth-smtp
+ perdomain yes
+ domains $(local_domains)
+ }
+
+ hostname $(hostname)
+
+ msgpipeline local_routing {
+ destination {{ .Values.domains.primary.name }} {
+ deliver_to &{{ .Values.domains.primary.name }}
+ }
+ {{ range .Values.domains.others }}
+ destination {{ .name }} {
+ deliver_to &{{ .name }}
+ }
+ {{ end }}
+ default_destination {
+ reject 550 5.1.1 "User doesn't exist"
+ }
+ }
+
+ smtp tcp://0.0.0.0:25 {
+ insecure_auth no
+
+ defer_sender_reject yes
+
+ limits {
+ # Up to 20 msgs/sec across max. 10 SMTP connections.
+ all rate 20 1s
+ all concurrency 10
+ }
+
+ dmarc yes
+ check {
+ require_mx_record
+ dkim
+ spf
+ }
+
+ source $(local_domains) {
+ reject 501 5.1.8 "Use Submission for outgoing SMTP"
+ }
+ default_source {
+ destination $(local_domains) {
+ deliver_to &local_routing
+ }
+ default_destination {
+ reject 550 5.1.1 "User doesn't exist"
+ }
+ }
+ }
+
+ submission tls://0.0.0.0:465 tcp://0.0.0.0:587 {
+ auth &authsmtp
+ insecure_auth yes
+
+ defer_sender_reject yes
+
+ source $(local_domains) {
+ destination $(local_domains) {
+ deliver_to &local_routing
+ }
+ default_destination {
+ modify {
+ dkim $(primary_domain) $(local_domains) default
+ }
+ deliver_to &remote_queue
+ }
+ }
+ default_source {
+ reject 501 5.1.8 "Non-local sender domain"
+ }
+ }
+
+ target.smtp {{ .Values.domains.primary.name }} {
+ hostname $(hostname)
+ attempt_starttls false
+ require_tls no
+ auth off
+ targets tcp://maddy.{{ .Values.domains.primary.namespace }}.svc.cluster.local:25
+ }
+
+ {{ range .Values.domains.others }}
+ target.smtp {{ .name }} {
+ hostname mail.{{ .name }}
+ attempt_starttls false
+ require_tls no
+ auth off
+ targets tcp://maddy.{{ .namespace }}.svc.cluster.local:25
+ }
+ {{ end }}
+
+ target.queue remote_queue {
+ target &outbound_delivery
+
+ autogenerated_msg_domain $(primary_domain)
+ bounce {
+ destination postmaster $(local_domains) {
+ deliver_to &local_routing
+ }
+ default_destination {
+ reject 550 5.0.0 "Refusing to send DSNs to non-local addresses"
+ }
+ }
+ }
+
+ target.remote outbound_delivery {
+ limits {
+ # Up to 20 msgs/sec across max. 10 SMTP connections
+ # for each recipient domain.
+ destination rate 20 1s
+ destination concurrency 10
+ }
+ mx_auth {
+ dane
+ mtasts {
+ cache fs
+ fs_dir mtasts_cache/
+ }
+ local_policy {
+ min_tls_level encrypted
+ min_mx_level none
+ }
+ }
+ }
diff --git a/charts/mail-gateway/templates/maddy.yaml b/charts/mail-gateway/templates/maddy.yaml
new file mode 100644
index 0000000..d3469b7
--- /dev/null
+++ b/charts/mail-gateway/templates/maddy.yaml
@@ -0,0 +1,97 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: maddy
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: maddy
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: maddy
+ annotations:
+ checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum }}
+ spec:
+ volumes:
+ - name: config
+ configMap:
+ name: maddy
+ - name: certs
+ secret:
+ secretName: cert-{{ .Values.domains.primary.mx }}
+ - name: data
+ persistentVolumeClaim:
+ claimName: data
+ containers:
+ - name: maddy
+ image: giolekva/maddy-auth-smtp:v0.4.4
+ imagePullPolicy: Always
+ ports:
+ - name: imap
+ containerPort: 143
+ protocol: TCP
+ - name: imaps
+ containerPort: 993
+ protocol: TCP
+ - name: smtp
+ containerPort: 25
+ protocol: TCP
+ - name: smtps
+ containerPort: 465
+ protocol: TCP
+ - name: submission
+ containerPort: 587
+ protocol: TCP
+ command:
+ - maddy
+ - -config
+ - /etc/maddy/config/maddy.conf
+ volumeMounts:
+ - name: config
+ mountPath: /etc/maddy/config
+ - name: certs
+ mountPath: /etc/maddy/certs
+ - name: data
+ mountPath: /var/lib/maddy
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: maddy
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: LoadBalancer
+ externalTrafficPolicy: Local
+ selector:
+ app: maddy
+ ports:
+ - name: imap
+ port: 143
+ protocol: TCP
+ - name: imaps
+ port: 993
+ protocol: TCP
+ - name: smtp
+ port: 25
+ protocol: TCP
+ - name: smtps
+ port: 465
+ protocol: TCP
+ - name: submission
+ port: 587
+ protocol: TCP
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: data
+ namespace: {{ .Release.Namespace }}
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size }}
diff --git a/charts/mail-gateway/templates/mx-certificate.yaml b/charts/mail-gateway/templates/mx-certificate.yaml
new file mode 100644
index 0000000..2999b08
--- /dev/null
+++ b/charts/mail-gateway/templates/mx-certificate.yaml
@@ -0,0 +1,14 @@
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: {{ .Values.domains.primary.mx }}
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ helm.sh/resource-policy: keep
+spec:
+ dnsNames:
+ - {{ .Values.domains.primary.mx }}
+ issuerRef:
+ name: {{ .Values.domains.primary.certificateIssuer }}
+ kind: ClusterIssuer
+ secretName: cert-{{ .Values.domains.primary.mx }}
diff --git a/charts/mail-gateway/values.yaml b/charts/mail-gateway/values.yaml
new file mode 100644
index 0000000..4810334
--- /dev/null
+++ b/charts/mail-gateway/values.yaml
@@ -0,0 +1,11 @@
+domains:
+ primary:
+ name: example.com
+ namespace: example-app-maddy
+ mx: mx1.example.com
+ certificateIssuer: public
+ others:
+ - name: other.com
+ namespace: other-app-maddy
+persistence:
+ size: 10Gi
diff --git a/charts/matrix/.helmignore b/charts/matrix/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/matrix/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/matrix/Chart.yaml b/charts/matrix/Chart.yaml
new file mode 100644
index 0000000..51cb744
--- /dev/null
+++ b/charts/matrix/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: matrix
+description: A Helm chart for Matrix on PCloud
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/matrix/templates/config-to-merge.yaml b/charts/matrix/templates/config-to-merge.yaml
new file mode 100644
index 0000000..eefe3e3
--- /dev/null
+++ b/charts/matrix/templates/config-to-merge.yaml
@@ -0,0 +1,44 @@
+apiVersion: dodo.cloud.dodo.cloud/v1
+kind: ResourceRenderer
+metadata:
+ name: config-renderer
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ helm.sh/hook: pre-install
+ helm.sh/hook-weight: "-10"
+spec:
+ secretName: {{ .Values.oauth2.secretName }}
+ resourceTemplate: |
+ apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: {{ .Values.configMerge.configName }}
+ namespace: {{ .Release.Namespace }}
+ data:
+ {{ .Values.configMerge.fileName }}: |
+ public_baseurl: https://{{ .Values.subdomain }}.{{ .Values.domain }}/
+ enable_registration: false
+ database:
+ name: psycopg2
+ txn_limit: 10000
+ args:
+ host: {{ .Values.postgresql.host }}
+ port: {{ .Values.postgresql.port }}
+ database: {{ .Values.postgresql.database }}
+ user: {{ .Values.postgresql.user }}
+ password: {{ .Values.postgresql.password }}
+ cp_min: 5
+ cp_max: 10
+ oidc_providers:
+ - idp_id: pcloud
+ idp_name: "PCloud"
+ skip_verification: true
+ issuer: {{ .Values.oauth2.issuer }}
+ client_id: "{{`{{ .client_id }}`}}"
+ client_secret: "{{`{{ .client_secret }}`}}"
+ scopes: ["openid", "profile"]
+ allow_existing_users: true
+ user_mapping_provider:
+ config:
+ localpart_template: "{{ cat "{{" "\"" "{{user.username}}" "\"" "}}" | nospace}}"
+ display_name_template: "{{ cat "{{" "\"" "{{user.username}}" "\"" "}}" | nospace}}"
diff --git a/charts/matrix/templates/matrix.yaml b/charts/matrix/templates/matrix.yaml
new file mode 100644
index 0000000..2cbc59a
--- /dev/null
+++ b/charts/matrix/templates/matrix.yaml
@@ -0,0 +1,202 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: CreateConfigMaps
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ helm.sh/hook: pre-install
+ helm.sh/hook-weight: "-10"
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: default-CreateConfigMaps
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ helm.sh/hook: pre-install
+ helm.sh/hook-weight: "-10"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: CreateConfigMaps
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: {{ .Release.Namespace }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: matrix
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: matrix
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: ingress
+ namespace: {{ .Release.Namespace }}
+ {{- if .Values.certificateIssuer }}
+ annotations:
+ acme.cert-manager.io/http01-edit-in-place: "true"
+ cert-manager.io/cluster-issuer: {{ .Values.certificateIssuer }}
+ {{- end }}
+spec:
+ ingressClassName: {{ .Values.ingressClassName }}
+ tls:
+ - hosts:
+ - {{ .Values.subdomain }}.{{ .Values.domain }}
+ secretName: cert-{{ .Values.subdomain }}.{{ .Values.domain }}
+ rules:
+ - host: {{ .Values.subdomain }}.{{ .Values.domain }}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: matrix
+ port:
+ name: http
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: generate-config
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ helm.sh/hook: pre-install
+ helm.sh/hook-weight: "-5"
+spec:
+ template:
+ metadata:
+ labels:
+ app: generate-config
+ spec:
+ restartPolicy: OnFailure
+ volumes:
+ - name: data
+ persistentVolumeClaim:
+ claimName: data
+ - name: config
+ configMap:
+ name: {{ .Values.configMerge.configName }}
+ initContainers:
+ - name: matrix
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: 8008
+ protocol: TCP
+ env:
+ - name: SYNAPSE_SERVER_NAME
+ value: "{{ .Values.domain }}"
+ - name: SYNAPSE_REPORT_STATS
+ value: "no"
+ - name: SYNAPSE_CONFIG_DIR
+ value: "/data"
+ - name: SYNAPSE_CONFIG_PATH
+ value: "/data/homeserver.yaml"
+ - name: SYNAPSE_DATA_DIR
+ value: "/data"
+ command:
+ - /start.py
+ - generate
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ containers:
+ - name: capture-config
+ image: giolekva/capture-config:latest
+ imagePullPolicy: Always
+ command:
+ - /capture-config
+ - --base=/data/homeserver.yaml
+ - --merge-with=/config-to-merge/{{ .Values.configMerge.fileName }}
+ - --namespace={{ .Release.Namespace }}
+ - --config-map-name=config
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ - name: config
+ mountPath: /config-to-merge
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: matrix
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: matrix
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: matrix
+ spec:
+ volumes:
+ - name: data
+ persistentVolumeClaim:
+ claimName: data
+ - name: homeserver-config
+ configMap:
+ name: config
+ containers:
+ - name: matrix
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: 8008
+ protocol: TCP
+ env:
+ - name: SYNAPSE_SERVER_NAME
+ value: "{{ .Values.domain }}"
+ - name: SYNAPSE_REPORT_STATS
+ value: "no"
+ - name: SYNAPSE_CONFIG_DIR
+ value: "/data"
+ - name: SYNAPSE_CONFIG_PATH
+ value: "/homeserver-config/homeserver.yaml"
+ - name: SYNAPSE_DATA_DIR
+ value: "/data"
+ command: ["/start.py"]
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ - name: homeserver-config
+ mountPath: /homeserver-config
+ readOnly: true
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: data
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ helm.sh/hook: pre-install
+ helm.sh/hook-weight: "-10"
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 10Gi
diff --git a/charts/matrix/templates/well-known.yaml b/charts/matrix/templates/well-known.yaml
new file mode 100644
index 0000000..33e4b94
--- /dev/null
+++ b/charts/matrix/templates/well-known.yaml
@@ -0,0 +1,118 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: well-known
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: well-known
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: well-known
+ namespace: {{ .Release.Namespace }}
+ {{- if .Values.certificateIssuer }}
+ annotations:
+ acme.cert-manager.io/http01-edit-in-place: "true"
+ cert-manager.io/cluster-issuer: {{ .Values.certificateIssuer }}
+ {{- end }}
+spec:
+ ingressClassName: {{ .Values.ingressClassName }}
+ tls:
+ - hosts:
+ - {{ .Values.domain }}
+ secretName: cert-{{ .Values.domain }}
+ rules:
+ - host: {{ .Values.domain }}
+ http:
+ paths:
+ - path: /.well-known/matrix
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: well-known
+ port:
+ name: http
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: well-known
+ namespace: {{ .Release.Namespace }}
+data:
+ nginx.conf: |
+ # user www www;
+ worker_processes 1;
+ error_log /dev/null crit;
+ # pid logs/nginx.pid;
+ worker_rlimit_nofile 8192;
+ events {
+ worker_connections 1024;
+ }
+ http {
+ server {
+ listen 8080;
+ location /.well-known/matrix/client {
+ return 200 '{"m.homeserver": {"base_url": "https://{{ .Values.subdomain }}.{{ .Values.domain }}"}}';
+ default_type application/json;
+ add_header Access-Control-Allow-Origin *;
+ }
+ location /.well-known/matrix/server {
+ return 200 '{"m.server": "{{ .Values.subdomain }}.{{ .Values.domain }}:443"}';
+ default_type application/json;
+ add_header Access-Control-Allow-Origin *;
+ }
+ }
+ }
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: well-known
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: well-known
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: well-known
+ spec:
+ volumes:
+ - name: config
+ configMap:
+ name: well-known
+ containers:
+ - name: nginx
+ image: nginx:1.21.3-alpine
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: http
+ containerPort: 8080
+ protocol: TCP
+ volumeMounts:
+ - name: config
+ mountPath: /etc/nginx
+ readOnly: true
+ resources:
+ requests:
+ memory: "10Mi"
+ cpu: "10m"
+ limits:
+ memory: "20Mi"
+ cpu: "100m"
+ tolerations:
+ - key: "pcloud"
+ operator: "Equal"
+ value: "role"
+ effect: "NoSchedule"
diff --git a/charts/matrix/values.yaml b/charts/matrix/values.yaml
new file mode 100644
index 0000000..5d57ae0
--- /dev/null
+++ b/charts/matrix/values.yaml
@@ -0,0 +1,20 @@
+image:
+ repository: matrixdotorg/synapse
+ tag: v1.98.0
+ pullPolicy: IfNotPresent
+domain: example.com
+subdomain: matrix
+oauth2:
+ issuer: https://oidc-issuer.example.com
+ secretName: oauth2-client
+postgresql:
+ host: postgresql
+ port: 5432
+ database: synapse
+ user: synapse_user
+ password: password
+certificateIssuer: public
+ingressClassName: nginx
+configMerge:
+ configName: config-to-merge
+ fileName: to-merge.yaml
diff --git a/charts/memberships/.helmignore b/charts/memberships/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/memberships/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/memberships/Chart.yaml b/charts/memberships/Chart.yaml
new file mode 100644
index 0000000..dd793d4
--- /dev/null
+++ b/charts/memberships/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: memberships
+description: A Helm chart for Memberships application
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/memberships/templates/install.yaml b/charts/memberships/templates/install.yaml
new file mode 100644
index 0000000..1d0034b
--- /dev/null
+++ b/charts/memberships/templates/install.yaml
@@ -0,0 +1,67 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: memberships
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: memberships
+ ports:
+ - name: {{ .Values.portName }}
+ protocol: TCP
+ port: 80
+ targetPort: ui
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: memberships-api
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: memberships
+ ports:
+ - name: {{ .Values.portName }}
+ protocol: TCP
+ port: 80
+ targetPort: api
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: memberships
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: memberships
+ template:
+ metadata:
+ labels:
+ app: memberships
+ spec:
+ containers:
+ - name: memberships
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ ports:
+ - name: ui
+ containerPort: 8080
+ protocol: TCP
+ - name: api
+ containerPort: 8081
+ protocol: TCP
+ command:
+ - /usr/bin/memberships
+ - --port=8080
+ - --api-port=8081
+ - --db-path=/data/memberships.db
+ volumeMounts:
+ - name: memberships
+ mountPath: /data
+ volumes:
+ - name: memberships
+ persistentVolumeClaim:
+ claimName: memberships
diff --git a/charts/memberships/templates/volume.yaml b/charts/memberships/templates/volume.yaml
new file mode 100644
index 0000000..48bbcb8
--- /dev/null
+++ b/charts/memberships/templates/volume.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: memberships
+ namespace: {{ .Release.Namespace }}
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.storage.size }}
diff --git a/charts/memberships/values.yaml b/charts/memberships/values.yaml
new file mode 100644
index 0000000..c4be0f8
--- /dev/null
+++ b/charts/memberships/values.yaml
@@ -0,0 +1,7 @@
+image:
+ repository: giolekva/memberships
+ tag: latest
+ pullPolicy: Always
+storage:
+ size: 1Gi
+portName: http
diff --git a/charts/metallb-0.13.7/.helmignore b/charts/metallb-0.13.7/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/metallb-0.13.7/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/metallb-0.13.7/Chart.yaml b/charts/metallb-0.13.7/Chart.yaml
new file mode 100644
index 0000000..76e774d
--- /dev/null
+++ b/charts/metallb-0.13.7/Chart.yaml
@@ -0,0 +1,11 @@
+apiVersion: v2
+appVersion: v0.13.7
+description: A network load-balancer implementation for Kubernetes using standard
+ routing protocols
+home: https://metallb.universe.tf
+icon: https://metallb.universe.tf/images/logo/metallb-white.png
+name: metallb
+sources:
+- https://github.com/metallb/metallb
+type: application
+version: 0.13.7
diff --git a/charts/metallb-0.13.7/README.md b/charts/metallb-0.13.7/README.md
new file mode 100644
index 0000000..25cb5d4
--- /dev/null
+++ b/charts/metallb-0.13.7/README.md
@@ -0,0 +1,148 @@
+# metallb
+
+  
+
+A network load-balancer implementation for Kubernetes using standard routing protocols
+
+**Homepage:** <https://metallb.universe.tf>
+
+## Source Code
+
+* <https://github.com/metallb/metallb>
+
+## Requirements
+
+| Repository | Name | Version |
+|------------|------|---------|
+| | crds | 0.0.0 |
+
+## Values
+
+| Key | Type | Default | Description |
+|-----|------|---------|-------------|
+| controller.affinity | object | `{}` | |
+| controller.enabled | bool | `true` | |
+| controller.image.pullPolicy | string | `nil` | |
+| controller.image.repository | string | `"quay.io/metallb/controller"` | |
+| controller.image.tag | string | `nil` | |
+| controller.livenessProbe.enabled | bool | `true` | |
+| controller.livenessProbe.failureThreshold | int | `3` | |
+| controller.livenessProbe.initialDelaySeconds | int | `10` | |
+| controller.livenessProbe.periodSeconds | int | `10` | |
+| controller.livenessProbe.successThreshold | int | `1` | |
+| controller.livenessProbe.timeoutSeconds | int | `1` | |
+| controller.logLevel | string | `"info"` | Controller log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none` |
+| controller.nodeSelector | object | `{}` | |
+| controller.podAnnotations | object | `{}` | |
+| controller.priorityClassName | string | `""` | |
+| controller.readinessProbe.enabled | bool | `true` | |
+| controller.readinessProbe.failureThreshold | int | `3` | |
+| controller.readinessProbe.initialDelaySeconds | int | `10` | |
+| controller.readinessProbe.periodSeconds | int | `10` | |
+| controller.readinessProbe.successThreshold | int | `1` | |
+| controller.readinessProbe.timeoutSeconds | int | `1` | |
+| controller.resources | object | `{}` | |
+| controller.runtimeClassName | string | `""` | |
+| controller.securityContext.fsGroup | int | `65534` | |
+| controller.securityContext.runAsNonRoot | bool | `true` | |
+| controller.securityContext.runAsUser | int | `65534` | |
+| controller.serviceAccount.annotations | object | `{}` | |
+| controller.serviceAccount.create | bool | `true` | |
+| controller.serviceAccount.name | string | `""` | |
+| controller.strategy.type | string | `"RollingUpdate"` | |
+| controller.tolerations | list | `[]` | |
+| crds.enabled | bool | `true` | |
+| crds.validationFailurePolicy | string | `"Fail"` | |
+| fullnameOverride | string | `""` | |
+| imagePullSecrets | list | `[]` | |
+| loadBalancerClass | string | `""` | |
+| nameOverride | string | `""` | |
+| prometheus.controllerMetricsTLSSecret | string | `""` | |
+| prometheus.metricsPort | int | `7472` | |
+| prometheus.namespace | string | `""` | |
+| prometheus.podMonitor.additionalLabels | object | `{}` | |
+| prometheus.podMonitor.annotations | object | `{}` | |
+| prometheus.podMonitor.enabled | bool | `false` | |
+| prometheus.podMonitor.interval | string | `nil` | |
+| prometheus.podMonitor.jobLabel | string | `"app.kubernetes.io/name"` | |
+| prometheus.podMonitor.metricRelabelings | list | `[]` | |
+| prometheus.podMonitor.relabelings | list | `[]` | |
+| prometheus.prometheusRule.additionalLabels | object | `{}` | |
+| prometheus.prometheusRule.addressPoolExhausted.enabled | bool | `true` | |
+| prometheus.prometheusRule.addressPoolExhausted.labels.severity | string | `"alert"` | |
+| prometheus.prometheusRule.addressPoolUsage.enabled | bool | `true` | |
+| prometheus.prometheusRule.addressPoolUsage.thresholds[0].labels.severity | string | `"warning"` | |
+| prometheus.prometheusRule.addressPoolUsage.thresholds[0].percent | int | `75` | |
+| prometheus.prometheusRule.addressPoolUsage.thresholds[1].labels.severity | string | `"warning"` | |
+| prometheus.prometheusRule.addressPoolUsage.thresholds[1].percent | int | `85` | |
+| prometheus.prometheusRule.addressPoolUsage.thresholds[2].labels.severity | string | `"alert"` | |
+| prometheus.prometheusRule.addressPoolUsage.thresholds[2].percent | int | `95` | |
+| prometheus.prometheusRule.annotations | object | `{}` | |
+| prometheus.prometheusRule.bgpSessionDown.enabled | bool | `true` | |
+| prometheus.prometheusRule.bgpSessionDown.labels.severity | string | `"alert"` | |
+| prometheus.prometheusRule.configNotLoaded.enabled | bool | `true` | |
+| prometheus.prometheusRule.configNotLoaded.labels.severity | string | `"warning"` | |
+| prometheus.prometheusRule.enabled | bool | `false` | |
+| prometheus.prometheusRule.extraAlerts | list | `[]` | |
+| prometheus.prometheusRule.staleConfig.enabled | bool | `true` | |
+| prometheus.prometheusRule.staleConfig.labels.severity | string | `"warning"` | |
+| prometheus.rbacPrometheus | bool | `true` | |
+| prometheus.rbacProxy.repository | string | `"gcr.io/kubebuilder/kube-rbac-proxy"` | |
+| prometheus.rbacProxy.tag | string | `"v0.12.0"` | |
+| prometheus.scrapeAnnotations | bool | `false` | |
+| prometheus.serviceAccount | string | `""` | |
+| prometheus.serviceMonitor.controller.additionalLabels | object | `{}` | |
+| prometheus.serviceMonitor.controller.annotations | object | `{}` | |
+| prometheus.serviceMonitor.controller.tlsConfig.insecureSkipVerify | bool | `true` | |
+| prometheus.serviceMonitor.enabled | bool | `false` | |
+| prometheus.serviceMonitor.interval | string | `nil` | |
+| prometheus.serviceMonitor.jobLabel | string | `"app.kubernetes.io/name"` | |
+| prometheus.serviceMonitor.metricRelabelings | list | `[]` | |
+| prometheus.serviceMonitor.relabelings | list | `[]` | |
+| prometheus.serviceMonitor.speaker.additionalLabels | object | `{}` | |
+| prometheus.serviceMonitor.speaker.annotations | object | `{}` | |
+| prometheus.serviceMonitor.speaker.tlsConfig.insecureSkipVerify | bool | `true` | |
+| prometheus.speakerMetricsTLSSecret | string | `""` | |
+| rbac.create | bool | `true` | |
+| speaker.affinity | object | `{}` | |
+| speaker.enabled | bool | `true` | |
+| speaker.frr.enabled | bool | `false` | |
+| speaker.frr.image.pullPolicy | string | `nil` | |
+| speaker.frr.image.repository | string | `"frrouting/frr"` | |
+| speaker.frr.image.tag | string | `"v7.5.1"` | |
+| speaker.frr.metricsPort | int | `7473` | |
+| speaker.frr.resources | object | `{}` | |
+| speaker.frrMetrics.resources | object | `{}` | |
+| speaker.image.pullPolicy | string | `nil` | |
+| speaker.image.repository | string | `"quay.io/metallb/speaker"` | |
+| speaker.image.tag | string | `nil` | |
+| speaker.livenessProbe.enabled | bool | `true` | |
+| speaker.livenessProbe.failureThreshold | int | `3` | |
+| speaker.livenessProbe.initialDelaySeconds | int | `10` | |
+| speaker.livenessProbe.periodSeconds | int | `10` | |
+| speaker.livenessProbe.successThreshold | int | `1` | |
+| speaker.livenessProbe.timeoutSeconds | int | `1` | |
+| speaker.logLevel | string | `"info"` | Speaker log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none` |
+| speaker.memberlist.enabled | bool | `true` | |
+| speaker.memberlist.mlBindPort | int | `7946` | |
+| speaker.nodeSelector | object | `{}` | |
+| speaker.podAnnotations | object | `{}` | |
+| speaker.priorityClassName | string | `""` | |
+| speaker.readinessProbe.enabled | bool | `true` | |
+| speaker.readinessProbe.failureThreshold | int | `3` | |
+| speaker.readinessProbe.initialDelaySeconds | int | `10` | |
+| speaker.readinessProbe.periodSeconds | int | `10` | |
+| speaker.readinessProbe.successThreshold | int | `1` | |
+| speaker.readinessProbe.timeoutSeconds | int | `1` | |
+| speaker.reloader.resources | object | `{}` | |
+| speaker.resources | object | `{}` | |
+| speaker.runtimeClassName | string | `""` | |
+| speaker.serviceAccount.annotations | object | `{}` | |
+| speaker.serviceAccount.create | bool | `true` | |
+| speaker.serviceAccount.name | string | `""` | |
+| speaker.tolerateMaster | bool | `true` | |
+| speaker.tolerations | list | `[]` | |
+| speaker.updateStrategy.type | string | `"RollingUpdate"` | |
+
+----------------------------------------------
+Autogenerated from chart metadata using [helm-docs v1.10.0](https://github.com/norwoodj/helm-docs/releases/v1.10.0)
diff --git a/charts/metallb-0.13.7/templates/manifest.yaml b/charts/metallb-0.13.7/templates/manifest.yaml
new file mode 100644
index 0000000..537ab7f
--- /dev/null
+++ b/charts/metallb-0.13.7/templates/manifest.yaml
@@ -0,0 +1,2033 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.11.1
+ creationTimestamp: null
+ name: addresspools.metallb.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+ service:
+ name: webhook-service
+ namespace: metallb-system
+ path: /convert
+ conversionReviewVersions:
+ - v1alpha1
+ - v1beta1
+ group: metallb.io
+ names:
+ kind: AddressPool
+ listKind: AddressPoolList
+ plural: addresspools
+ singular: addresspool
+ scope: Namespaced
+ versions:
+ - deprecated: true
+ deprecationWarning: metallb.io v1alpha1 AddressPool is deprecated
+ name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: AddressPool is the Schema for the addresspools API.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AddressPoolSpec defines the desired state of AddressPool.
+ properties:
+ addresses:
+ description: A list of IP address ranges over which MetalLB has authority.
+ You can list multiple ranges in a single pool, they will all share
+ the same settings. Each range can be either a CIDR prefix, or an
+ explicit start-end range of IPs.
+ items:
+ type: string
+ type: array
+ autoAssign:
+ default: true
+ description: AutoAssign flag used to prevent MetallB from automatic
+ allocation for a pool.
+ type: boolean
+ bgpAdvertisements:
+ description: When an IP is allocated from this pool, how should it
+ be translated into BGP announcements?
+ items:
+ properties:
+ aggregationLength:
+ default: 32
+ description: The aggregation-length advertisement option lets
+ you “roll up” the /32s into a larger prefix.
+ format: int32
+ minimum: 1
+ type: integer
+ aggregationLengthV6:
+ default: 128
+ description: Optional, defaults to 128 (i.e. no aggregation)
+ if not specified.
+ format: int32
+ type: integer
+ communities:
+ description: BGP communities
+ items:
+ type: string
+ type: array
+ localPref:
+ description: BGP LOCAL_PREF attribute which is used by BGP best
+ path algorithm, Path with higher localpref is preferred over
+ one with lower localpref.
+ format: int32
+ type: integer
+ type: object
+ type: array
+ protocol:
+ description: Protocol can be used to select how the announcement is
+ done.
+ enum:
+ - layer2
+ - bgp
+ type: string
+ required:
+ - addresses
+ - protocol
+ type: object
+ status:
+ description: AddressPoolStatus defines the observed state of AddressPool.
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - deprecated: true
+ deprecationWarning: metallb.io v1beta1 AddressPool is deprecated, consider using
+ IPAddressPool
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: AddressPool represents a pool of IP addresses that can be allocated
+ to LoadBalancer services. AddressPool is deprecated and being replaced by
+ IPAddressPool.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AddressPoolSpec defines the desired state of AddressPool.
+ properties:
+ addresses:
+ description: A list of IP address ranges over which MetalLB has authority.
+ You can list multiple ranges in a single pool, they will all share
+ the same settings. Each range can be either a CIDR prefix, or an
+ explicit start-end range of IPs.
+ items:
+ type: string
+ type: array
+ autoAssign:
+ default: true
+ description: AutoAssign flag used to prevent MetallB from automatic
+ allocation for a pool.
+ type: boolean
+ bgpAdvertisements:
+ description: Drives how an IP allocated from this pool should translated
+ into BGP announcements.
+ items:
+ properties:
+ aggregationLength:
+ default: 32
+ description: The aggregation-length advertisement option lets
+ you “roll up” the /32s into a larger prefix.
+ format: int32
+ minimum: 1
+ type: integer
+ aggregationLengthV6:
+ default: 128
+ description: Optional, defaults to 128 (i.e. no aggregation)
+ if not specified.
+ format: int32
+ type: integer
+ communities:
+ description: BGP communities to be associated with the given
+ advertisement.
+ items:
+ type: string
+ type: array
+ localPref:
+ description: BGP LOCAL_PREF attribute which is used by BGP best
+ path algorithm, Path with higher localpref is preferred over
+ one with lower localpref.
+ format: int32
+ type: integer
+ type: object
+ type: array
+ protocol:
+ description: Protocol can be used to select how the announcement is
+ done.
+ enum:
+ - layer2
+ - bgp
+ type: string
+ required:
+ - addresses
+ - protocol
+ type: object
+ status:
+ description: AddressPoolStatus defines the observed state of AddressPool.
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.11.1
+ creationTimestamp: null
+ name: bfdprofiles.metallb.io
+spec:
+ group: metallb.io
+ names:
+ kind: BFDProfile
+ listKind: BFDProfileList
+ plural: bfdprofiles
+ singular: bfdprofile
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .spec.passiveMode
+ name: Passive Mode
+ type: boolean
+ - jsonPath: .spec.transmitInterval
+ name: Transmit Interval
+ type: integer
+ - jsonPath: .spec.receiveInterval
+ name: Receive Interval
+ type: integer
+ - jsonPath: .spec.detectMultiplier
+ name: Multiplier
+ type: integer
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: BFDProfile represents the settings of the bfd session that can
+ be optionally associated with a BGP session.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BFDProfileSpec defines the desired state of BFDProfile.
+ properties:
+ detectMultiplier:
+ description: Configures the detection multiplier to determine packet
+ loss. The remote transmission interval will be multiplied by this
+ value to determine the connection loss detection timer.
+ format: int32
+ maximum: 255
+ minimum: 2
+ type: integer
+ echoInterval:
+ description: Configures the minimal echo receive transmission interval
+ that this system is capable of handling in milliseconds. Defaults
+ to 50ms
+ format: int32
+ maximum: 60000
+ minimum: 10
+ type: integer
+ echoMode:
+ description: Enables or disables the echo transmission mode. This
+ mode is disabled by default, and not supported on multi hops setups.
+ type: boolean
+ minimumTtl:
+ description: 'For multi hop sessions only: configure the minimum expected
+ TTL for an incoming BFD control packet.'
+ format: int32
+ maximum: 254
+ minimum: 1
+ type: integer
+ passiveMode:
+ description: 'Mark session as passive: a passive session will not
+ attempt to start the connection and will wait for control packets
+ from peer before it begins replying.'
+ type: boolean
+ receiveInterval:
+ description: The minimum interval that this system is capable of receiving
+ control packets in milliseconds. Defaults to 300ms.
+ format: int32
+ maximum: 60000
+ minimum: 10
+ type: integer
+ transmitInterval:
+ description: The minimum transmission interval (less jitter) that
+ this system wants to use to send BFD control packets in milliseconds.
+ Defaults to 300ms
+ format: int32
+ maximum: 60000
+ minimum: 10
+ type: integer
+ type: object
+ status:
+ description: BFDProfileStatus defines the observed state of BFDProfile.
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.11.1
+ creationTimestamp: null
+ name: bgpadvertisements.metallb.io
+spec:
+ group: metallb.io
+ names:
+ kind: BGPAdvertisement
+ listKind: BGPAdvertisementList
+ plural: bgpadvertisements
+ singular: bgpadvertisement
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .spec.ipAddressPools
+ name: IPAddressPools
+ type: string
+ - jsonPath: .spec.ipAddressPoolSelectors
+ name: IPAddressPool Selectors
+ type: string
+ - jsonPath: .spec.peers
+ name: Peers
+ type: string
+ - jsonPath: .spec.nodeSelectors
+ name: Node Selectors
+ priority: 10
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: BGPAdvertisement allows to advertise the IPs coming from the
+ selected IPAddressPools via BGP, setting the parameters of the BGP Advertisement.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BGPAdvertisementSpec defines the desired state of BGPAdvertisement.
+ properties:
+ aggregationLength:
+ default: 32
+ description: The aggregation-length advertisement option lets you
+ “roll up” the /32s into a larger prefix. Defaults to 32. Works for
+ IPv4 addresses.
+ format: int32
+ minimum: 1
+ type: integer
+ aggregationLengthV6:
+ default: 128
+ description: The aggregation-length advertisement option lets you
+ “roll up” the /128s into a larger prefix. Defaults to 128. Works
+ for IPv6 addresses.
+ format: int32
+ type: integer
+ communities:
+ description: The BGP communities to be associated with the announcement.
+ Each item can be a standard community of the form 1234:1234, a large
+ community of the form large:1234:1234:1234 or the name of an alias
+ defined in the Community CRD.
+ items:
+ type: string
+ type: array
+ ipAddressPoolSelectors:
+ description: A selector for the IPAddressPools which would get advertised
+ via this advertisement. If no IPAddressPool is selected by this
+ or by the list, the advertisement is applied to all the IPAddressPools.
+ items:
+ description: A label selector is a label query over a set of resources.
+ The result of matchLabels and matchExpressions are ANDed. An empty
+ label selector matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that
+ contains values, a key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the
+ operator is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator
+ is "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ ipAddressPools:
+ description: The list of IPAddressPools to advertise via this advertisement,
+ selected by name.
+ items:
+ type: string
+ type: array
+ localPref:
+ description: The BGP LOCAL_PREF attribute which is used by BGP best
+ path algorithm, Path with higher localpref is preferred over one
+ with lower localpref.
+ format: int32
+ type: integer
+ nodeSelectors:
+ description: NodeSelectors allows to limit the nodes to announce as
+ next hops for the LoadBalancer IP. When empty, all the nodes having are
+ announced as next hops.
+ items:
+ description: A label selector is a label query over a set of resources.
+ The result of matchLabels and matchExpressions are ANDed. An empty
+ label selector matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that
+ contains values, a key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the
+ operator is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator
+ is "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ peers:
+ description: Peers limits the bgppeer to advertise the ips of the
+ selected pools to. When empty, the loadbalancer IP is announced
+ to all the BGPPeers configured.
+ items:
+ type: string
+ type: array
+ type: object
+ status:
+ description: BGPAdvertisementStatus defines the observed state of BGPAdvertisement.
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.11.1
+ creationTimestamp: null
+ name: bgppeers.metallb.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+ service:
+ name: webhook-service
+ namespace: metallb-system
+ path: /convert
+ conversionReviewVersions:
+ - v1beta1
+ - v1beta2
+ group: metallb.io
+ names:
+ kind: BGPPeer
+ listKind: BGPPeerList
+ plural: bgppeers
+ singular: bgppeer
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .spec.peerAddress
+ name: Address
+ type: string
+ - jsonPath: .spec.peerASN
+ name: ASN
+ type: string
+ - jsonPath: .spec.bfdProfile
+ name: BFD Profile
+ type: string
+ - jsonPath: .spec.ebgpMultiHop
+ name: Multi Hops
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: BGPPeer is the Schema for the peers API.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BGPPeerSpec defines the desired state of Peer.
+ properties:
+ bfdProfile:
+ type: string
+ ebgpMultiHop:
+ description: EBGP peer is multi-hops away
+ type: boolean
+ holdTime:
+ description: Requested BGP hold time, per RFC4271.
+ type: string
+ keepaliveTime:
+ description: Requested BGP keepalive time, per RFC4271.
+ type: string
+ myASN:
+ description: AS number to use for the local end of the session.
+ format: int32
+ maximum: 4294967295
+ minimum: 0
+ type: integer
+ nodeSelectors:
+ description: Only connect to this peer on nodes that match one of
+ these selectors.
+ items:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ minItems: 1
+ type: array
+ required:
+ - key
+ - operator
+ - values
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ type: array
+ password:
+ description: Authentication password for routers enforcing TCP MD5
+ authenticated sessions
+ type: string
+ peerASN:
+ description: AS number to expect from the remote end of the session.
+ format: int32
+ maximum: 4294967295
+ minimum: 0
+ type: integer
+ peerAddress:
+ description: Address to dial when establishing the session.
+ type: string
+ peerPort:
+ description: Port to dial when establishing the session.
+ maximum: 16384
+ minimum: 0
+ type: integer
+ routerID:
+ description: BGP router ID to advertise to the peer
+ type: string
+ sourceAddress:
+ description: Source address to use when establishing the session.
+ type: string
+ required:
+ - myASN
+ - peerASN
+ - peerAddress
+ type: object
+ status:
+ description: BGPPeerStatus defines the observed state of Peer.
+ type: object
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - jsonPath: .spec.peerAddress
+ name: Address
+ type: string
+ - jsonPath: .spec.peerASN
+ name: ASN
+ type: string
+ - jsonPath: .spec.bfdProfile
+ name: BFD Profile
+ type: string
+ - jsonPath: .spec.ebgpMultiHop
+ name: Multi Hops
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: BGPPeer is the Schema for the peers API.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BGPPeerSpec defines the desired state of Peer.
+ properties:
+ bfdProfile:
+ description: The name of the BFD Profile to be used for the BFD session
+ associated to the BGP session. If not set, the BFD session won't
+ be set up.
+ type: string
+ ebgpMultiHop:
+ description: To set if the BGPPeer is multi-hops away. Needed for
+ FRR mode only.
+ type: boolean
+ holdTime:
+ description: Requested BGP hold time, per RFC4271.
+ type: string
+ keepaliveTime:
+ description: Requested BGP keepalive time, per RFC4271.
+ type: string
+ myASN:
+ description: AS number to use for the local end of the session.
+ format: int32
+ maximum: 4294967295
+ minimum: 0
+ type: integer
+ nodeSelectors:
+ description: Only connect to this peer on nodes that match one of
+ these selectors.
+ items:
+ description: A label selector is a label query over a set of resources.
+ The result of matchLabels and matchExpressions are ANDed. An empty
+ label selector matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that
+ contains values, a key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the
+ operator is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator
+ is "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ password:
+ description: Authentication password for routers enforcing TCP MD5
+ authenticated sessions
+ type: string
+ passwordSecret:
+ description: passwordSecret is name of the authentication secret for
+ BGP Peer. the secret must be of type "kubernetes.io/basic-auth",
+ and created in the same namespace as the MetalLB deployment. The
+ password is stored in the secret as the key "password".
+ properties:
+ name:
+ description: name is unique within a namespace to reference a
+ secret resource.
+ type: string
+ namespace:
+ description: namespace defines the space within which the secret
+ name must be unique.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ peerASN:
+ description: AS number to expect from the remote end of the session.
+ format: int32
+ maximum: 4294967295
+ minimum: 0
+ type: integer
+ peerAddress:
+ description: Address to dial when establishing the session.
+ type: string
+ peerPort:
+ default: 179
+ description: Port to dial when establishing the session.
+ maximum: 16384
+ minimum: 0
+ type: integer
+ routerID:
+ description: BGP router ID to advertise to the peer
+ type: string
+ sourceAddress:
+ description: Source address to use when establishing the session.
+ type: string
+ vrf:
+ description: To set if we want to peer with the BGPPeer using an interface
+ belonging to a host vrf
+ type: string
+ required:
+ - myASN
+ - peerASN
+ - peerAddress
+ type: object
+ status:
+ description: BGPPeerStatus defines the observed state of Peer.
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.11.1
+ creationTimestamp: null
+ name: communities.metallb.io
+spec:
+ group: metallb.io
+ names:
+ kind: Community
+ listKind: CommunityList
+ plural: communities
+ singular: community
+ scope: Namespaced
+ versions:
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: Community is a collection of aliases for communities. Users can
+ define named aliases to be used in the BGPPeer CRD.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: CommunitySpec defines the desired state of Community.
+ properties:
+ communities:
+ items:
+ properties:
+ name:
+ description: The name of the alias for the community.
+ type: string
+ value:
+ description: The BGP community value corresponding to the given
+ name. Can be a standard community of the form 1234:1234 or
+ a large community of the form large:1234:1234:1234.
+ type: string
+ type: object
+ type: array
+ type: object
+ status:
+ description: CommunityStatus defines the observed state of Community.
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.11.1
+ creationTimestamp: null
+ name: ipaddresspools.metallb.io
+spec:
+ group: metallb.io
+ names:
+ kind: IPAddressPool
+ listKind: IPAddressPoolList
+ plural: ipaddresspools
+ singular: ipaddresspool
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .spec.autoAssign
+ name: Auto Assign
+ type: boolean
+ - jsonPath: .spec.avoidBuggyIPs
+ name: Avoid Buggy IPs
+ type: boolean
+ - jsonPath: .spec.addresses
+ name: Addresses
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: IPAddressPool represents a pool of IP addresses that can be allocated
+ to LoadBalancer services.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: IPAddressPoolSpec defines the desired state of IPAddressPool.
+ properties:
+ addresses:
+ description: A list of IP address ranges over which MetalLB has authority.
+ You can list multiple ranges in a single pool, they will all share
+ the same settings. Each range can be either a CIDR prefix, or an
+ explicit start-end range of IPs.
+ items:
+ type: string
+ type: array
+ autoAssign:
+ default: true
+ description: AutoAssign flag used to prevent MetallB from automatic
+ allocation for a pool.
+ type: boolean
+ avoidBuggyIPs:
+ default: false
+ description: AvoidBuggyIPs prevents addresses ending with .0 and .255
+ to be used by a pool.
+ type: boolean
+ serviceAllocation:
+ description: AllocateTo makes ip pool allocation to specific namespace
+ and/or service. The controller will use the pool with lowest value
+ of priority in case of multiple matches. A pool with no priority
+ set will be used only if the pools with priority can't be used.
+ If multiple matching IPAddressPools are available it will check
+ for the availability of IPs sorting the matching IPAddressPools
+ by priority, starting from the highest to the lowest. If multiple
+ IPAddressPools have the same priority, choice will be random.
+ properties:
+ namespaceSelectors:
+ description: NamespaceSelectors list of label selectors to select
+ namespace(s) for ip pool, an alternative to using namespace
+ list.
+ items:
+ description: A label selector is a label query over a set of
+ resources. The result of matchLabels and matchExpressions
+ are ANDed. An empty label selector matches all objects. A
+ null label selector matches no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn,
+ Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists or
+ DoesNotExist, the values array must be empty. This
+ array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field is
+ "key", the operator is "In", and the values array contains
+ only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ namespaces:
+ description: Namespaces list of namespace(s) on which ip pool
+ can be attached.
+ items:
+ type: string
+ type: array
+ priority:
+ description: Priority priority given for ip pool while ip allocation
+ on a service.
+ type: integer
+ serviceSelectors:
+ description: ServiceSelectors list of label selector to select
+ service(s) for which ip pool can be used for ip allocation.
+ items:
+ description: A label selector is a label query over a set of
+ resources. The result of matchLabels and matchExpressions
+ are ANDed. An empty label selector matches all objects. A
+ null label selector matches no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn,
+ Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists or
+ DoesNotExist, the values array must be empty. This
+ array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field is
+ "key", the operator is "In", and the values array contains
+ only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ type: object
+ required:
+ - addresses
+ type: object
+ status:
+ description: IPAddressPoolStatus defines the observed state of IPAddressPool.
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.11.1
+ creationTimestamp: null
+ name: l2advertisements.metallb.io
+spec:
+ group: metallb.io
+ names:
+ kind: L2Advertisement
+ listKind: L2AdvertisementList
+ plural: l2advertisements
+ singular: l2advertisement
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .spec.ipAddressPools
+ name: IPAddressPools
+ type: string
+ - jsonPath: .spec.ipAddressPoolSelectors
+ name: IPAddressPool Selectors
+ type: string
+ - jsonPath: .spec.interfaces
+ name: Interfaces
+ type: string
+ - jsonPath: .spec.nodeSelectors
+ name: Node Selectors
+ priority: 10
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: L2Advertisement allows to advertise the LoadBalancer IPs provided
+ by the selected pools via L2.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: L2AdvertisementSpec defines the desired state of L2Advertisement.
+ properties:
+ interfaces:
+ description: A list of interfaces to announce from. The LB IP will
+ be announced only from these interfaces. If the field is not set,
+ we advertise from all the interfaces on the host.
+ items:
+ type: string
+ type: array
+ ipAddressPoolSelectors:
+ description: A selector for the IPAddressPools which would get advertised
+ via this advertisement. If no IPAddressPool is selected by this
+ or by the list, the advertisement is applied to all the IPAddressPools.
+ items:
+ description: A label selector is a label query over a set of resources.
+ The result of matchLabels and matchExpressions are ANDed. An empty
+ label selector matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that
+ contains values, a key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the
+ operator is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator
+ is "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ ipAddressPools:
+ description: The list of IPAddressPools to advertise via this advertisement,
+ selected by name.
+ items:
+ type: string
+ type: array
+ nodeSelectors:
+ description: NodeSelectors allows to limit the nodes to announce as
+ next hops for the LoadBalancer IP. When empty, all the nodes having are
+ announced as next hops.
+ items:
+ description: A label selector is a label query over a set of resources.
+ The result of matchLabels and matchExpressions are ANDed. An empty
+ label selector matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that
+ contains values, a key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the
+ operator is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator
+ is "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ type: object
+ status:
+ description: L2AdvertisementStatus defines the observed state of L2Advertisement.
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ app: metallb
+ name: controller
+ namespace: metallb-system
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ app: metallb
+ name: speaker
+ namespace: metallb-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ app: metallb
+ name: controller
+ namespace: metallb-system
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resourceNames:
+ - memberlist
+ resources:
+ - secrets
+ verbs:
+ - list
+- apiGroups:
+ - apps
+ resourceNames:
+ - controller
+ resources:
+ - deployments
+ verbs:
+ - get
+- apiGroups:
+ - metallb.io
+ resources:
+ - bgppeers
+ verbs:
+ - get
+ - list
+- apiGroups:
+ - metallb.io
+ resources:
+ - addresspools
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - metallb.io
+ resources:
+ - bfdprofiles
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - metallb.io
+ resources:
+ - ipaddresspools
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - metallb.io
+ resources:
+ - bgpadvertisements
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - metallb.io
+ resources:
+ - l2advertisements
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - metallb.io
+ resources:
+ - communities
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ app: metallb
+ name: pod-lister
+ namespace: metallb-system
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - list
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - metallb.io
+ resources:
+ - addresspools
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - metallb.io
+ resources:
+ - bfdprofiles
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - metallb.io
+ resources:
+ - bgppeers
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - metallb.io
+ resources:
+ - l2advertisements
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - metallb.io
+ resources:
+ - bgpadvertisements
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - metallb.io
+ resources:
+ - ipaddresspools
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - metallb.io
+ resources:
+ - communities
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app: metallb
+ name: metallb-system:controller
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - services
+ - namespaces
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - list
+- apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+- apiGroups:
+ - policy
+ resourceNames:
+ - controller
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
+- apiGroups:
+ - admissionregistration.k8s.io
+ resourceNames:
+ - metallb-webhook-configuration
+ resources:
+ - validatingwebhookconfigurations
+ - mutatingwebhookconfigurations
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - validatingwebhookconfigurations
+ - mutatingwebhookconfigurations
+ verbs:
+ - list
+ - watch
+- apiGroups:
+ - apiextensions.k8s.io
+ resourceNames:
+ - addresspools.metallb.io
+ - bfdprofiles.metallb.io
+ - bgpadvertisements.metallb.io
+ - bgppeers.metallb.io
+ - ipaddresspools.metallb.io
+ - l2advertisements.metallb.io
+ - communities.metallb.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app: metallb
+ name: metallb-system:speaker
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - services
+ - endpoints
+ - nodes
+ - namespaces
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+- apiGroups:
+ - policy
+ resourceNames:
+ - speaker
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ app: metallb
+ name: controller
+ namespace: metallb-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: controller
+subjects:
+- kind: ServiceAccount
+ name: controller
+ namespace: metallb-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ app: metallb
+ name: pod-lister
+ namespace: metallb-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: pod-lister
+subjects:
+- kind: ServiceAccount
+ name: speaker
+ namespace: metallb-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app: metallb
+ name: metallb-system:controller
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: metallb-system:controller
+subjects:
+- kind: ServiceAccount
+ name: controller
+ namespace: metallb-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app: metallb
+ name: metallb-system:speaker
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: metallb-system:speaker
+subjects:
+- kind: ServiceAccount
+ name: speaker
+ namespace: metallb-system
+---
+apiVersion: v1
+data:
+ excludel2.yaml: |
+ announcedInterfacesToExclude: ["docker.*", "cbr.*", "dummy.*", "virbr.*", "lxcbr.*", "veth.*", "lo", "^cali.*", "^tunl.*", "flannel.*", "kube-ipvs.*", "cni.*", "^nodelocaldns.*"]
+kind: ConfigMap
+metadata:
+ name: metallb-excludel2
+ namespace: metallb-system
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: webhook-server-cert
+ namespace: metallb-system
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: webhook-service
+ namespace: metallb-system
+spec:
+ ports:
+ - port: 443
+ targetPort: 9443
+ selector:
+ component: controller
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: metallb
+ component: controller
+ name: controller
+ namespace: metallb-system
+spec:
+ revisionHistoryLimit: 3
+ selector:
+ matchLabels:
+ app: metallb
+ component: controller
+ template:
+ metadata:
+ annotations:
+ prometheus.io/port: "7472"
+ prometheus.io/scrape: "true"
+ labels:
+ app: metallb
+ component: controller
+ spec:
+ containers:
+ - args:
+ - --port=7472
+ - --log-level=info
+ env:
+ - name: METALLB_ML_SECRET_NAME
+ value: memberlist
+ - name: METALLB_DEPLOYMENT
+ value: controller
+ image: quay.io/metallb/controller:v0.13.10
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /metrics
+ port: monitoring
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ name: controller
+ ports:
+ - containerPort: 7472
+ name: monitoring
+ - containerPort: 9443
+ name: webhook-server
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /metrics
+ port: monitoring
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - all
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /tmp/k8s-webhook-server/serving-certs
+ name: cert
+ readOnly: true
+ nodeSelector:
+ kubernetes.io/os: linux
+ securityContext:
+ fsGroup: 65534
+ runAsNonRoot: true
+ runAsUser: 65534
+ serviceAccountName: controller
+ terminationGracePeriodSeconds: 0
+ volumes:
+ - name: cert
+ secret:
+ defaultMode: 420
+ secretName: webhook-server-cert
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ labels:
+ app: metallb
+ component: speaker
+ name: speaker
+ namespace: metallb-system
+spec:
+ selector:
+ matchLabels:
+ app: metallb
+ component: speaker
+ template:
+ metadata:
+ annotations:
+ prometheus.io/port: "7472"
+ prometheus.io/scrape: "true"
+ labels:
+ app: metallb
+ component: speaker
+ spec:
+ containers:
+ - args:
+ - --port=7472
+ - --log-level=info
+ env:
+ - name: METALLB_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: METALLB_HOST
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: METALLB_ML_BIND_ADDR
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: METALLB_ML_LABELS
+ value: app=metallb,component=speaker
+ - name: METALLB_ML_SECRET_KEY_PATH
+ value: /etc/ml_secret_key
+ image: quay.io/metallb/speaker:v0.13.10
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /metrics
+ port: monitoring
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ name: speaker
+ ports:
+ - containerPort: 7472
+ name: monitoring
+ - containerPort: 7946
+ name: memberlist-tcp
+ - containerPort: 7946
+ name: memberlist-udp
+ protocol: UDP
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /metrics
+ port: monitoring
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ add:
+ - NET_RAW
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /etc/ml_secret_key
+ name: memberlist
+ readOnly: true
+ - mountPath: /etc/metallb
+ name: metallb-excludel2
+ readOnly: true
+ hostNetwork: true
+ nodeSelector:
+ kubernetes.io/os: linux
+ serviceAccountName: speaker
+ terminationGracePeriodSeconds: 2
+ tolerations:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ operator: Exists
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ volumes:
+ - name: memberlist
+ secret:
+ defaultMode: 420
+ secretName: memberlist
+ - configMap:
+ defaultMode: 256
+ name: metallb-excludel2
+ name: metallb-excludel2
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ creationTimestamp: null
+ name: metallb-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: webhook-service
+ namespace: metallb-system
+ path: /validate-metallb-io-v1beta2-bgppeer
+ failurePolicy: Fail
+ name: bgppeersvalidationwebhook.metallb.io
+ rules:
+ - apiGroups:
+ - metallb.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - bgppeers
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: webhook-service
+ namespace: metallb-system
+ path: /validate-metallb-io-v1beta1-addresspool
+ failurePolicy: Fail
+ name: addresspoolvalidationwebhook.metallb.io
+ rules:
+ - apiGroups:
+ - metallb.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - addresspools
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: webhook-service
+ namespace: metallb-system
+ path: /validate-metallb-io-v1beta1-bfdprofile
+ failurePolicy: Fail
+ name: bfdprofilevalidationwebhook.metallb.io
+ rules:
+ - apiGroups:
+ - metallb.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ - DELETE
+ resources:
+ - bfdprofiles
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: webhook-service
+ namespace: metallb-system
+ path: /validate-metallb-io-v1beta1-bgpadvertisement
+ failurePolicy: Fail
+ name: bgpadvertisementvalidationwebhook.metallb.io
+ rules:
+ - apiGroups:
+ - metallb.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - bgpadvertisements
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: webhook-service
+ namespace: metallb-system
+ path: /validate-metallb-io-v1beta1-community
+ failurePolicy: Fail
+ name: communityvalidationwebhook.metallb.io
+ rules:
+ - apiGroups:
+ - metallb.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - communities
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: webhook-service
+ namespace: metallb-system
+ path: /validate-metallb-io-v1beta1-ipaddresspool
+ failurePolicy: Fail
+ name: ipaddresspoolvalidationwebhook.metallb.io
+ rules:
+ - apiGroups:
+ - metallb.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - ipaddresspools
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: webhook-service
+ namespace: metallb-system
+ path: /validate-metallb-io-v1beta1-l2advertisement
+ failurePolicy: Fail
+ name: l2advertisementvalidationwebhook.metallb.io
+ rules:
+ - apiGroups:
+ - metallb.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - l2advertisements
+ sideEffects: None
diff --git a/charts/metallb-0.13.7/values.yaml b/charts/metallb-0.13.7/values.yaml
new file mode 100644
index 0000000..2b990b7
--- /dev/null
+++ b/charts/metallb-0.13.7/values.yaml
@@ -0,0 +1,12 @@
+controller:
+ image:
+ repository: quay.io/metallb/controller
+ tag:
+ pullPolicy:
+ logLevel: info
+speaker:
+ image:
+ repository: quay.io/metallb/speaker
+ tag:
+ pullPolicy:
+ logLevel: info
diff --git a/charts/metallb-ipaddresspool/.helmignore b/charts/metallb-ipaddresspool/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/metallb-ipaddresspool/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/metallb-ipaddresspool/Chart.yaml b/charts/metallb-ipaddresspool/Chart.yaml
new file mode 100644
index 0000000..6c585c0
--- /dev/null
+++ b/charts/metallb-ipaddresspool/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: metallb-ipaddresspool
+description: A Helm chart to configure Metallb IPAddressPool
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/metallb-ipaddresspool/templates/ip-address-pool.yaml b/charts/metallb-ipaddresspool/templates/ip-address-pool.yaml
new file mode 100644
index 0000000..218bb36
--- /dev/null
+++ b/charts/metallb-ipaddresspool/templates/ip-address-pool.yaml
@@ -0,0 +1,19 @@
+{{- $ns := default .Release.Namespace .Values.namespace }}
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ name: {{ .Values.name }}
+ namespace: {{ $ns }}
+spec:
+ autoAssign: {{ .Values.autoAssign }}
+ addresses:
+ - {{ .Values.from }}-{{ .Values.to }}
+---
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: {{ .Values.name }}
+ namespace: {{ $ns }}
+spec:
+ ipAddressPools:
+ - {{ .Values.name }}
diff --git a/charts/metallb-ipaddresspool/values.yaml b/charts/metallb-ipaddresspool/values.yaml
new file mode 100644
index 0000000..107ff65
--- /dev/null
+++ b/charts/metallb-ipaddresspool/values.yaml
@@ -0,0 +1,5 @@
+name: foo
+autoAssign: false
+from: 10.1.0.10
+to: 10.1.0.255
+namespace: ""
diff --git a/charts/metallb/.helmignore b/charts/metallb/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/metallb/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/metallb/Chart.lock b/charts/metallb/Chart.lock
new file mode 100644
index 0000000..425c50f
--- /dev/null
+++ b/charts/metallb/Chart.lock
@@ -0,0 +1,6 @@
+dependencies:
+- name: crds
+ repository: ""
+ version: 0.13.12
+digest: sha256:bc3d2abdac552d6a886bd1d533eef9a432e5809a0dda4a85c7de4fdf2094cdb0
+generated: "2023-10-20T16:56:55.333731157+02:00"
diff --git a/charts/metallb/Chart.yaml b/charts/metallb/Chart.yaml
new file mode 100644
index 0000000..6e964d7
--- /dev/null
+++ b/charts/metallb/Chart.yaml
@@ -0,0 +1,17 @@
+apiVersion: v2
+appVersion: v0.13.12
+dependencies:
+- condition: crds.enabled
+ name: crds
+ repository: ""
+ version: 0.13.12
+description: A network load-balancer implementation for Kubernetes using standard
+ routing protocols
+home: https://metallb.universe.tf
+icon: https://metallb.universe.tf/images/logo/metallb-white.png
+kubeVersion: '>= 1.19.0-0'
+name: metallb
+sources:
+- https://github.com/metallb/metallb
+type: application
+version: 0.13.12
diff --git a/charts/metallb/README.md b/charts/metallb/README.md
new file mode 100644
index 0000000..11bbe7d
--- /dev/null
+++ b/charts/metallb/README.md
@@ -0,0 +1,158 @@
+# metallb
+
+  
+
+A network load-balancer implementation for Kubernetes using standard routing protocols
+
+**Homepage:** <https://metallb.universe.tf>
+
+## Source Code
+
+* <https://github.com/metallb/metallb>
+
+## Requirements
+
+Kubernetes: `>= 1.19.0-0`
+
+| Repository | Name | Version |
+|------------|------|---------|
+| | crds | 0.0.0 |
+
+## Values
+
+| Key | Type | Default | Description |
+|-----|------|---------|-------------|
+| controller.affinity | object | `{}` | |
+| controller.enabled | bool | `true` | |
+| controller.image.pullPolicy | string | `nil` | |
+| controller.image.repository | string | `"quay.io/metallb/controller"` | |
+| controller.image.tag | string | `nil` | |
+| controller.labels | object | `{}` | |
+| controller.livenessProbe.enabled | bool | `true` | |
+| controller.livenessProbe.failureThreshold | int | `3` | |
+| controller.livenessProbe.initialDelaySeconds | int | `10` | |
+| controller.livenessProbe.periodSeconds | int | `10` | |
+| controller.livenessProbe.successThreshold | int | `1` | |
+| controller.livenessProbe.timeoutSeconds | int | `1` | |
+| controller.logLevel | string | `"info"` | Controller log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none` |
+| controller.nodeSelector | object | `{}` | |
+| controller.podAnnotations | object | `{}` | |
+| controller.priorityClassName | string | `""` | |
+| controller.readinessProbe.enabled | bool | `true` | |
+| controller.readinessProbe.failureThreshold | int | `3` | |
+| controller.readinessProbe.initialDelaySeconds | int | `10` | |
+| controller.readinessProbe.periodSeconds | int | `10` | |
+| controller.readinessProbe.successThreshold | int | `1` | |
+| controller.readinessProbe.timeoutSeconds | int | `1` | |
+| controller.resources | object | `{}` | |
+| controller.runtimeClassName | string | `""` | |
+| controller.securityContext.fsGroup | int | `65534` | |
+| controller.securityContext.runAsNonRoot | bool | `true` | |
+| controller.securityContext.runAsUser | int | `65534` | |
+| controller.serviceAccount.annotations | object | `{}` | |
+| controller.serviceAccount.create | bool | `true` | |
+| controller.serviceAccount.name | string | `""` | |
+| controller.strategy.type | string | `"RollingUpdate"` | |
+| controller.tolerations | list | `[]` | |
+| crds.enabled | bool | `true` | |
+| crds.validationFailurePolicy | string | `"Fail"` | |
+| fullnameOverride | string | `""` | |
+| imagePullSecrets | list | `[]` | |
+| loadBalancerClass | string | `""` | |
+| nameOverride | string | `""` | |
+| prometheus.controllerMetricsTLSSecret | string | `""` | |
+| prometheus.metricsPort | int | `7472` | |
+| prometheus.namespace | string | `""` | |
+| prometheus.podMonitor.additionalLabels | object | `{}` | |
+| prometheus.podMonitor.annotations | object | `{}` | |
+| prometheus.podMonitor.enabled | bool | `false` | |
+| prometheus.podMonitor.interval | string | `nil` | |
+| prometheus.podMonitor.jobLabel | string | `"app.kubernetes.io/name"` | |
+| prometheus.podMonitor.metricRelabelings | list | `[]` | |
+| prometheus.podMonitor.relabelings | list | `[]` | |
+| prometheus.prometheusRule.additionalLabels | object | `{}` | |
+| prometheus.prometheusRule.addressPoolExhausted.enabled | bool | `true` | |
+| prometheus.prometheusRule.addressPoolExhausted.labels.severity | string | `"alert"` | |
+| prometheus.prometheusRule.addressPoolUsage.enabled | bool | `true` | |
+| prometheus.prometheusRule.addressPoolUsage.thresholds[0].labels.severity | string | `"warning"` | |
+| prometheus.prometheusRule.addressPoolUsage.thresholds[0].percent | int | `75` | |
+| prometheus.prometheusRule.addressPoolUsage.thresholds[1].labels.severity | string | `"warning"` | |
+| prometheus.prometheusRule.addressPoolUsage.thresholds[1].percent | int | `85` | |
+| prometheus.prometheusRule.addressPoolUsage.thresholds[2].labels.severity | string | `"alert"` | |
+| prometheus.prometheusRule.addressPoolUsage.thresholds[2].percent | int | `95` | |
+| prometheus.prometheusRule.annotations | object | `{}` | |
+| prometheus.prometheusRule.bgpSessionDown.enabled | bool | `true` | |
+| prometheus.prometheusRule.bgpSessionDown.labels.severity | string | `"alert"` | |
+| prometheus.prometheusRule.configNotLoaded.enabled | bool | `true` | |
+| prometheus.prometheusRule.configNotLoaded.labels.severity | string | `"warning"` | |
+| prometheus.prometheusRule.enabled | bool | `false` | |
+| prometheus.prometheusRule.extraAlerts | list | `[]` | |
+| prometheus.prometheusRule.staleConfig.enabled | bool | `true` | |
+| prometheus.prometheusRule.staleConfig.labels.severity | string | `"warning"` | |
+| prometheus.rbacPrometheus | bool | `true` | |
+| prometheus.rbacProxy.pullPolicy | string | `nil` | |
+| prometheus.rbacProxy.repository | string | `"gcr.io/kubebuilder/kube-rbac-proxy"` | |
+| prometheus.rbacProxy.tag | string | `"v0.12.0"` | |
+| prometheus.scrapeAnnotations | bool | `false` | |
+| prometheus.serviceAccount | string | `""` | |
+| prometheus.serviceMonitor.controller.additionalLabels | object | `{}` | |
+| prometheus.serviceMonitor.controller.annotations | object | `{}` | |
+| prometheus.serviceMonitor.controller.tlsConfig.insecureSkipVerify | bool | `true` | |
+| prometheus.serviceMonitor.enabled | bool | `false` | |
+| prometheus.serviceMonitor.interval | string | `nil` | |
+| prometheus.serviceMonitor.jobLabel | string | `"app.kubernetes.io/name"` | |
+| prometheus.serviceMonitor.metricRelabelings | list | `[]` | |
+| prometheus.serviceMonitor.relabelings | list | `[]` | |
+| prometheus.serviceMonitor.speaker.additionalLabels | object | `{}` | |
+| prometheus.serviceMonitor.speaker.annotations | object | `{}` | |
+| prometheus.serviceMonitor.speaker.tlsConfig.insecureSkipVerify | bool | `true` | |
+| prometheus.speakerMetricsTLSSecret | string | `""` | |
+| rbac.create | bool | `true` | |
+| speaker.affinity | object | `{}` | |
+| speaker.enabled | bool | `true` | |
+| speaker.excludeInterfaces.enabled | bool | `true` | |
+| speaker.frr.enabled | bool | `true` | |
+| speaker.frr.image.pullPolicy | string | `nil` | |
+| speaker.frr.image.repository | string | `"quay.io/frrouting/frr"` | |
+| speaker.frr.image.tag | string | `"8.5.2"` | |
+| speaker.frr.metricsPort | int | `7473` | |
+| speaker.frr.resources | object | `{}` | |
+| speaker.frrMetrics.resources | object | `{}` | |
+| speaker.image.pullPolicy | string | `nil` | |
+| speaker.image.repository | string | `"quay.io/metallb/speaker"` | |
+| speaker.image.tag | string | `nil` | |
+| speaker.labels | object | `{}` | |
+| speaker.livenessProbe.enabled | bool | `true` | |
+| speaker.livenessProbe.failureThreshold | int | `3` | |
+| speaker.livenessProbe.initialDelaySeconds | int | `10` | |
+| speaker.livenessProbe.periodSeconds | int | `10` | |
+| speaker.livenessProbe.successThreshold | int | `1` | |
+| speaker.livenessProbe.timeoutSeconds | int | `1` | |
+| speaker.logLevel | string | `"info"` | Speaker log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none` |
+| speaker.memberlist.enabled | bool | `true` | |
+| speaker.memberlist.mlBindPort | int | `7946` | |
+| speaker.memberlist.mlSecretKeyPath | string | `"/etc/ml_secret_key"` | |
+| speaker.nodeSelector | object | `{}` | |
+| speaker.podAnnotations | object | `{}` | |
+| speaker.priorityClassName | string | `""` | |
+| speaker.readinessProbe.enabled | bool | `true` | |
+| speaker.readinessProbe.failureThreshold | int | `3` | |
+| speaker.readinessProbe.initialDelaySeconds | int | `10` | |
+| speaker.readinessProbe.periodSeconds | int | `10` | |
+| speaker.readinessProbe.successThreshold | int | `1` | |
+| speaker.readinessProbe.timeoutSeconds | int | `1` | |
+| speaker.reloader.resources | object | `{}` | |
+| speaker.resources | object | `{}` | |
+| speaker.runtimeClassName | string | `""` | |
+| speaker.serviceAccount.annotations | object | `{}` | |
+| speaker.serviceAccount.create | bool | `true` | |
+| speaker.serviceAccount.name | string | `""` | |
+| speaker.startupProbe.enabled | bool | `true` | |
+| speaker.startupProbe.failureThreshold | int | `30` | |
+| speaker.startupProbe.periodSeconds | int | `5` | |
+| speaker.tolerateMaster | bool | `true` | |
+| speaker.tolerations | list | `[]` | |
+| speaker.updateStrategy.type | string | `"RollingUpdate"` | |
+
+----------------------------------------------
+Autogenerated from chart metadata using [helm-docs v1.10.0](https://github.com/norwoodj/helm-docs/releases/v1.10.0)
diff --git a/charts/metallb/charts/crds/.helmignore b/charts/metallb/charts/crds/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/metallb/charts/crds/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/metallb/charts/crds/Chart.yaml b/charts/metallb/charts/crds/Chart.yaml
new file mode 100644
index 0000000..255ac2b
--- /dev/null
+++ b/charts/metallb/charts/crds/Chart.yaml
@@ -0,0 +1,10 @@
+apiVersion: v2
+appVersion: v0.13.12
+description: MetalLB CRDs
+home: https://metallb.universe.tf
+icon: https://metallb.universe.tf/images/logo/metallb-white.png
+name: crds
+sources:
+- https://github.com/metallb/metallb
+type: application
+version: 0.13.12
diff --git a/charts/metallb/charts/crds/README.md b/charts/metallb/charts/crds/README.md
new file mode 100644
index 0000000..15bf8a7
--- /dev/null
+++ b/charts/metallb/charts/crds/README.md
@@ -0,0 +1,14 @@
+# crds
+
+  
+
+MetalLB CRDs
+
+**Homepage:** <https://metallb.universe.tf>
+
+## Source Code
+
+* <https://github.com/metallb/metallb>
+
+----------------------------------------------
+Autogenerated from chart metadata using [helm-docs v1.10.0](https://github.com/norwoodj/helm-docs/releases/v1.10.0)
diff --git a/charts/metallb/charts/crds/templates/crds.yaml b/charts/metallb/charts/crds/templates/crds.yaml
new file mode 100644
index 0000000..9b415ac
--- /dev/null
+++ b/charts/metallb/charts/crds/templates/crds.yaml
@@ -0,0 +1,1233 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ name: addresspools.metallb.io
+spec:
+ group: metallb.io
+ names:
+ kind: AddressPool
+ listKind: AddressPoolList
+ plural: addresspools
+ singular: addresspool
+ scope: Namespaced
+ conversion:
+ strategy: Webhook
+ webhook:
+ conversionReviewVersions: ["v1alpha1", "v1beta1"]
+ clientConfig:
+ # this is a valid pem format, otherwise the apiserver will reject the deletion of the crds
+ # with "unable to parse bytes as PEM block", The controller will patch it with the right content after it starts
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+ service:
+ namespace: {{ .Release.Namespace }}
+ name: metallb-webhook-service
+ path: /convert
+ versions:
+ - deprecated: true
+ deprecationWarning: metallb.io v1alpha1 AddressPool is deprecated
+ name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: AddressPool is the Schema for the addresspools API.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AddressPoolSpec defines the desired state of AddressPool.
+ properties:
+ addresses:
+ description: A list of IP address ranges over which MetalLB has authority.
+ You can list multiple ranges in a single pool, they will all share
+ the same settings. Each range can be either a CIDR prefix, or an
+ explicit start-end range of IPs.
+ items:
+ type: string
+ type: array
+ autoAssign:
+ default: true
+ description: AutoAssign flag used to prevent MetallB from automatic
+ allocation for a pool.
+ type: boolean
+ bgpAdvertisements:
+ description: When an IP is allocated from this pool, how should it
+ be translated into BGP announcements?
+ items:
+ properties:
+ aggregationLength:
+ default: 32
+ description: The aggregation-length advertisement option lets
+ you “roll up” the /32s into a larger prefix.
+ format: int32
+ minimum: 1
+ type: integer
+ aggregationLengthV6:
+ default: 128
+ description: Optional, defaults to 128 (i.e. no aggregation)
+ if not specified.
+ format: int32
+ type: integer
+ communities:
+ description: BGP communities
+ items:
+ type: string
+ type: array
+ localPref:
+ description: BGP LOCAL_PREF attribute which is used by BGP best
+ path algorithm, Path with higher localpref is preferred over
+ one with lower localpref.
+ format: int32
+ type: integer
+ type: object
+ type: array
+ protocol:
+ description: Protocol can be used to select how the announcement is
+ done.
+ enum:
+ - layer2
+ - bgp
+ type: string
+ required:
+ - addresses
+ - protocol
+ type: object
+ status:
+ description: AddressPoolStatus defines the observed state of AddressPool.
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - deprecated: true
+ deprecationWarning: metallb.io v1beta1 AddressPool is deprecated, consider using
+ IPAddressPool
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: AddressPool represents a pool of IP addresses that can be allocated
+ to LoadBalancer services. AddressPool is deprecated and being replaced by
+ IPAddressPool.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AddressPoolSpec defines the desired state of AddressPool.
+ properties:
+ addresses:
+ description: A list of IP address ranges over which MetalLB has authority.
+ You can list multiple ranges in a single pool, they will all share
+ the same settings. Each range can be either a CIDR prefix, or an
+ explicit start-end range of IPs.
+ items:
+ type: string
+ type: array
+ autoAssign:
+ default: true
+ description: AutoAssign flag used to prevent MetallB from automatic
+ allocation for a pool.
+ type: boolean
+ bgpAdvertisements:
+ description: Drives how an IP allocated from this pool should translated
+ into BGP announcements.
+ items:
+ properties:
+ aggregationLength:
+ default: 32
+ description: The aggregation-length advertisement option lets
+ you “roll up” the /32s into a larger prefix.
+ format: int32
+ minimum: 1
+ type: integer
+ aggregationLengthV6:
+ default: 128
+ description: Optional, defaults to 128 (i.e. no aggregation)
+ if not specified.
+ format: int32
+ type: integer
+ communities:
+ description: BGP communities to be associated with the given
+ advertisement.
+ items:
+ type: string
+ type: array
+ localPref:
+ description: BGP LOCAL_PREF attribute which is used by BGP best
+ path algorithm, Path with higher localpref is preferred over
+ one with lower localpref.
+ format: int32
+ type: integer
+ type: object
+ type: array
+ protocol:
+ description: Protocol can be used to select how the announcement is
+ done.
+ enum:
+ - layer2
+ - bgp
+ type: string
+ required:
+ - addresses
+ - protocol
+ type: object
+ status:
+ description: AddressPoolStatus defines the observed state of AddressPool.
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ name: bfdprofiles.metallb.io
+spec:
+ group: metallb.io
+ names:
+ kind: BFDProfile
+ listKind: BFDProfileList
+ plural: bfdprofiles
+ singular: bfdprofile
+ scope: Namespaced
+ versions:
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: BFDProfile represents the settings of the bfd session that can
+ be optionally associated with a BGP session.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BFDProfileSpec defines the desired state of BFDProfile.
+ properties:
+ detectMultiplier:
+ description: Configures the detection multiplier to determine packet
+ loss. The remote transmission interval will be multiplied by this
+ value to determine the connection loss detection timer.
+ format: int32
+ maximum: 255
+ minimum: 2
+ type: integer
+ echoInterval:
+ description: Configures the minimal echo receive transmission interval
+ that this system is capable of handling in milliseconds. Defaults
+ to 50ms
+ format: int32
+ maximum: 60000
+ minimum: 10
+ type: integer
+ echoMode:
+ description: Enables or disables the echo transmission mode. This
+ mode is disabled by default, and not supported on multi hops setups.
+ type: boolean
+ minimumTtl:
+ description: 'For multi hop sessions only: configure the minimum expected
+ TTL for an incoming BFD control packet.'
+ format: int32
+ maximum: 254
+ minimum: 1
+ type: integer
+ passiveMode:
+ description: 'Mark session as passive: a passive session will not
+ attempt to start the connection and will wait for control packets
+ from peer before it begins replying.'
+ type: boolean
+ receiveInterval:
+ description: The minimum interval that this system is capable of receiving
+ control packets in milliseconds. Defaults to 300ms.
+ format: int32
+ maximum: 60000
+ minimum: 10
+ type: integer
+ transmitInterval:
+ description: The minimum transmission interval (less jitter) that
+ this system wants to use to send BFD control packets in milliseconds.
+ Defaults to 300ms
+ format: int32
+ maximum: 60000
+ minimum: 10
+ type: integer
+ type: object
+ status:
+ description: BFDProfileStatus defines the observed state of BFDProfile.
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ name: bgpadvertisements.metallb.io
+spec:
+ group: metallb.io
+ names:
+ kind: BGPAdvertisement
+ listKind: BGPAdvertisementList
+ plural: bgpadvertisements
+ singular: bgpadvertisement
+ scope: Namespaced
+ versions:
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: BGPAdvertisement allows to advertise the IPs coming from the
+ selected IPAddressPools via BGP, setting the parameters of the BGP Advertisement.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BGPAdvertisementSpec defines the desired state of BGPAdvertisement.
+ properties:
+ aggregationLength:
+ default: 32
+ description: The aggregation-length advertisement option lets you
+ “roll up” the /32s into a larger prefix. Defaults to 32. Works for
+ IPv4 addresses.
+ format: int32
+ minimum: 1
+ type: integer
+ aggregationLengthV6:
+ default: 128
+ description: The aggregation-length advertisement option lets you
+ “roll up” the /128s into a larger prefix. Defaults to 128. Works
+ for IPv6 addresses.
+ format: int32
+ type: integer
+ communities:
+ description: The BGP communities to be associated with the announcement.
+ Each item can be a community of the form 1234:1234 or the name of
+ an alias defined in the Community CRD.
+ items:
+ type: string
+ type: array
+ ipAddressPoolSelectors:
+ description: A selector for the IPAddressPools which would get advertised
+ via this advertisement. If no IPAddressPool is selected by this
+ or by the list, the advertisement is applied to all the IPAddressPools.
+ items:
+ description: A label selector is a label query over a set of resources.
+ The result of matchLabels and matchExpressions are ANDed. An empty
+ label selector matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that
+ contains values, a key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the
+ operator is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator
+ is "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ type: object
+ type: array
+ ipAddressPools:
+ description: The list of IPAddressPools to advertise via this advertisement,
+ selected by name.
+ items:
+ type: string
+ type: array
+ localPref:
+ description: The BGP LOCAL_PREF attribute which is used by BGP best
+ path algorithm, Path with higher localpref is preferred over one
+ with lower localpref.
+ format: int32
+ type: integer
+ nodeSelectors:
+ description: NodeSelectors allows to limit the nodes to announce as
+ next hops for the LoadBalancer IP. When empty, all the nodes having are
+ announced as next hops.
+ items:
+ description: A label selector is a label query over a set of resources.
+ The result of matchLabels and matchExpressions are ANDed. An empty
+ label selector matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that
+ contains values, a key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the
+ operator is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator
+ is "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ type: object
+ type: array
+ peers:
+ description: Peers limits the bgppeer to advertise the ips of the
+ selected pools to. When empty, the loadbalancer IP is announced
+ to all the BGPPeers configured.
+ items:
+ type: string
+ type: array
+ type: object
+ status:
+ description: BGPAdvertisementStatus defines the observed state of BGPAdvertisement.
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ name: bgppeers.metallb.io
+spec:
+ group: metallb.io
+ names:
+ kind: BGPPeer
+ listKind: BGPPeerList
+ plural: bgppeers
+ singular: bgppeer
+ scope: Namespaced
+ conversion:
+ strategy: Webhook
+ webhook:
+ conversionReviewVersions: ["v1beta1", "v1beta2"]
+ clientConfig:
+ # this is a valid pem format, otherwise the apiserver will reject the deletion of the crds
+ # with "unable to parse bytes as PEM block", The controller will patch it with the right content after it starts
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+ service:
+ namespace: {{ .Release.Namespace }}
+ name: metallb-webhook-service
+ path: /convert
+ versions:
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: BGPPeer is the Schema for the peers API.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BGPPeerSpec defines the desired state of Peer.
+ properties:
+ bfdProfile:
+ type: string
+ ebgpMultiHop:
+ description: EBGP peer is multi-hops away
+ type: boolean
+ holdTime:
+ description: Requested BGP hold time, per RFC4271.
+ type: string
+ keepaliveTime:
+ description: Requested BGP keepalive time, per RFC4271.
+ type: string
+ myASN:
+ description: AS number to use for the local end of the session.
+ format: int32
+ maximum: 4294967295
+ minimum: 0
+ type: integer
+ nodeSelectors:
+ description: Only connect to this peer on nodes that match one of
+ these selectors.
+ items:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ minItems: 1
+ type: array
+ required:
+ - key
+ - operator
+ - values
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ type: array
+ password:
+ description: Authentication password for routers enforcing TCP MD5
+ authenticated sessions
+ type: string
+ peerASN:
+ description: AS number to expect from the remote end of the session.
+ format: int32
+ maximum: 4294967295
+ minimum: 0
+ type: integer
+ peerAddress:
+ description: Address to dial when establishing the session.
+ type: string
+ peerPort:
+ description: Port to dial when establishing the session.
+ maximum: 16384
+ minimum: 0
+ type: integer
+ routerID:
+ description: BGP router ID to advertise to the peer
+ type: string
+ sourceAddress:
+ description: Source address to use when establishing the session.
+ type: string
+ required:
+ - myASN
+ - peerASN
+ - peerAddress
+ type: object
+ status:
+ description: BGPPeerStatus defines the observed state of Peer.
+ type: object
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: BGPPeer is the Schema for the peers API.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BGPPeerSpec defines the desired state of Peer.
+ properties:
+ bfdProfile:
+ description: The name of the BFD Profile to be used for the BFD session
+ associated to the BGP session. If not set, the BFD session won't
+ be set up.
+ type: string
+ ebgpMultiHop:
+ description: To set if the BGPPeer is multi-hops away. Needed for
+ FRR mode only.
+ type: boolean
+ holdTime:
+ description: Requested BGP hold time, per RFC4271.
+ type: string
+ keepaliveTime:
+ description: Requested BGP keepalive time, per RFC4271.
+ type: string
+ myASN:
+ description: AS number to use for the local end of the session.
+ format: int32
+ maximum: 4294967295
+ minimum: 0
+ type: integer
+ nodeSelectors:
+ description: Only connect to this peer on nodes that match one of
+ these selectors.
+ items:
+ description: A label selector is a label query over a set of resources.
+ The result of matchLabels and matchExpressions are ANDed. An empty
+ label selector matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that
+ contains values, a key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the
+ operator is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator
+ is "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ type: object
+ type: array
+ password:
+ description: Authentication password for routers enforcing TCP MD5
+ authenticated sessions
+ type: string
+ passwordSecret:
+ description: passwordSecret is name of the authentication secret for
+ BGP Peer. the secret must be of type "kubernetes.io/basic-auth",
+ and created in the same namespace as the MetalLB deployment. The
+ password is stored in the secret as the key "password".
+ properties:
+ name:
+ description: Name is unique within a namespace to reference a
+ secret resource.
+ type: string
+ namespace:
+ description: Namespace defines the space within which the secret
+ name must be unique.
+ type: string
+ type: object
+ peerASN:
+ description: AS number to expect from the remote end of the session.
+ format: int32
+ maximum: 4294967295
+ minimum: 0
+ type: integer
+ peerAddress:
+ description: Address to dial when establishing the session.
+ type: string
+ peerPort:
+ default: 179
+ description: Port to dial when establishing the session.
+ maximum: 16384
+ minimum: 0
+ type: integer
+ routerID:
+ description: BGP router ID to advertise to the peer
+ type: string
+ sourceAddress:
+ description: Source address to use when establishing the session.
+ type: string
+ vrf:
+ description: To set if we want to peer with the BGPPeer using an interface
+ belonging to a host vrf
+ type: string
+ required:
+ - myASN
+ - peerASN
+ - peerAddress
+ type: object
+ status:
+ description: BGPPeerStatus defines the observed state of Peer.
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ name: ipaddresspools.metallb.io
+spec:
+ group: metallb.io
+ names:
+ kind: IPAddressPool
+ listKind: IPAddressPoolList
+ plural: ipaddresspools
+ singular: ipaddresspool
+ scope: Namespaced
+ versions:
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: IPAddressPool represents a pool of IP addresses that can be allocated
+ to LoadBalancer services.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: IPAddressPoolSpec defines the desired state of IPAddressPool.
+ properties:
+ addresses:
+ description: A list of IP address ranges over which MetalLB has authority.
+ You can list multiple ranges in a single pool, they will all share
+ the same settings. Each range can be either a CIDR prefix, or an
+ explicit start-end range of IPs.
+ items:
+ type: string
+ type: array
+ autoAssign:
+ default: true
+ description: AutoAssign flag used to prevent MetallB from automatic
+ allocation for a pool.
+ type: boolean
+ avoidBuggyIPs:
+ default: false
+ description: AvoidBuggyIPs prevents addresses ending with .0 and .255
+ to be used by a pool.
+ type: boolean
+ serviceAllocation:
+ description: AllocateTo makes ip pool allocation to specific namespace
+ and/or service. The controller will use the pool with lowest value
+ of priority in case of multiple matches. A pool with no priority
+ set will be used only if the pools with priority can't be used.
+ If multiple matching IPAddressPools are available it will check
+ for the availability of IPs sorting the matching IPAddressPools
+ by priority, starting from the highest to the lowest. If multiple
+ IPAddressPools have the same priority, choice will be random.
+ properties:
+ namespaceSelectors:
+ description: NamespaceSelectors list of label selectors to select
+ namespace(s) for ip pool, an alternative to using namespace
+ list.
+ items:
+ description: A label selector is a label query over a set of
+ resources. The result of matchLabels and matchExpressions
+ are ANDed. An empty label selector matches all objects. A
+ null label selector matches no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn,
+ Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists or
+ DoesNotExist, the values array must be empty. This
+ array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field is
+ "key", the operator is "In", and the values array contains
+ only "value". The requirements are ANDed.
+ type: object
+ type: object
+ type: array
+ namespaces:
+ description: Namespaces list of namespace(s) on which ip pool
+ can be attached.
+ items:
+ type: string
+ type: array
+ priority:
+ description: Priority priority given for ip pool while ip allocation
+ on a service.
+ type: integer
+ serviceSelectors:
+ description: ServiceSelectors list of label selector to select
+ service(s) for which ip pool can be used for ip allocation.
+ items:
+ description: A label selector is a label query over a set of
+ resources. The result of matchLabels and matchExpressions
+ are ANDed. An empty label selector matches all objects. A
+ null label selector matches no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn,
+ Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists or
+ DoesNotExist, the values array must be empty. This
+ array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field is
+ "key", the operator is "In", and the values array contains
+ only "value". The requirements are ANDed.
+ type: object
+ type: object
+ type: array
+ type: object
+ required:
+ - addresses
+ type: object
+ status:
+ description: IPAddressPoolStatus defines the observed state of IPAddressPool.
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ name: l2advertisements.metallb.io
+spec:
+ group: metallb.io
+ names:
+ kind: L2Advertisement
+ listKind: L2AdvertisementList
+ plural: l2advertisements
+ singular: l2advertisement
+ scope: Namespaced
+ versions:
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: L2Advertisement allows to advertise the LoadBalancer IPs provided
+ by the selected pools via L2.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: L2AdvertisementSpec defines the desired state of L2Advertisement.
+ properties:
+ interfaces:
+ description: A list of interfaces to announce from. The LB IP will
+ be announced only from these interfaces. If the field is not set,
+ we advertise from all the interfaces on the host.
+ items:
+ type: string
+ type: array
+ ipAddressPoolSelectors:
+ description: A selector for the IPAddressPools which would get advertised
+ via this advertisement. If no IPAddressPool is selected by this
+ or by the list, the advertisement is applied to all the IPAddressPools.
+ items:
+ description: A label selector is a label query over a set of resources.
+ The result of matchLabels and matchExpressions are ANDed. An empty
+ label selector matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that
+ contains values, a key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the
+ operator is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator
+ is "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ type: object
+ type: array
+ ipAddressPools:
+ description: The list of IPAddressPools to advertise via this advertisement,
+ selected by name.
+ items:
+ type: string
+ type: array
+ nodeSelectors:
+ description: NodeSelectors allows to limit the nodes to announce as
+ next hops for the LoadBalancer IP. When empty, all the nodes having are
+ announced as next hops.
+ items:
+ description: A label selector is a label query over a set of resources.
+ The result of matchLabels and matchExpressions are ANDed. An empty
+ label selector matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that
+ contains values, a key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the
+ operator is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator
+ is "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ type: object
+ type: array
+ type: object
+ status:
+ description: L2AdvertisementStatus defines the observed state of L2Advertisement.
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ creationTimestamp: null
+ name: communities.metallb.io
+spec:
+ group: metallb.io
+ names:
+ kind: Community
+ listKind: CommunityList
+ plural: communities
+ singular: community
+ scope: Namespaced
+ versions:
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: Community is a collection of aliases for communities. Users can
+ define named aliases to be used in the BGPPeer CRD.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: CommunitySpec defines the desired state of Community.
+ properties:
+ communities:
+ items:
+ properties:
+ name:
+ description: The name of the alias for the community.
+ type: string
+ value:
+ description: The BGP community value corresponding to the given
+ name.
+ type: string
+ type: object
+ type: array
+ type: object
+ status:
+ description: CommunityStatus defines the observed state of Community.
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/charts/metallb/policy/controller.rego b/charts/metallb/policy/controller.rego
new file mode 100644
index 0000000..716eeb7
--- /dev/null
+++ b/charts/metallb/policy/controller.rego
@@ -0,0 +1,16 @@
+package main
+
+# validate serviceAccountName
+deny[msg] {
+ input.kind == "Deployment"
+ serviceAccountName := input.spec.template.spec.serviceAccountName
+ not serviceAccountName == "RELEASE-NAME-metallb-controller"
+ msg = sprintf("controller serviceAccountName '%s' does not match expected value", [serviceAccountName])
+}
+
+# validate node selector includes builtin when custom ones are provided
+deny[msg] {
+ input.kind == "Deployment"
+ not input.spec.template.spec.nodeSelector["kubernetes.io/os"] == "linux"
+ msg = "controller nodeSelector does not include '\"kubernetes.io/os\": linux'"
+}
diff --git a/charts/metallb/policy/rbac.rego b/charts/metallb/policy/rbac.rego
new file mode 100644
index 0000000..047345e
--- /dev/null
+++ b/charts/metallb/policy/rbac.rego
@@ -0,0 +1,27 @@
+package main
+
+# Validate PSP exists in ClusterRole :controller
+deny[msg] {
+ input.kind == "ClusterRole"
+ input.metadata.name == "metallb:controller"
+ input.rules[3] == {
+ "apiGroups": ["policy"],
+ "resources": ["podsecuritypolicies"],
+ "resourceNames": ["metallb-controller"],
+ "verbs": ["use"]
+ }
+ msg = "ClusterRole metallb:controller does not include PSP rule"
+}
+
+# Validate PSP exists in ClusterRole :speaker
+deny[msg] {
+ input.kind == "ClusterRole"
+ input.metadata.name == "metallb:speaker"
+ input.rules[3] == {
+ "apiGroups": ["policy"],
+ "resources": ["podsecuritypolicies"],
+ "resourceNames": ["metallb-controller"],
+ "verbs": ["use"]
+ }
+ msg = "ClusterRole metallb:speaker does not include PSP rule"
+}
diff --git a/charts/metallb/policy/speaker.rego b/charts/metallb/policy/speaker.rego
new file mode 100644
index 0000000..d4d8137
--- /dev/null
+++ b/charts/metallb/policy/speaker.rego
@@ -0,0 +1,30 @@
+package main
+
+# validate serviceAccountName
+deny[msg] {
+ input.kind == "DaemonSet"
+ serviceAccountName := input.spec.template.spec.serviceAccountName
+ not serviceAccountName == "RELEASE-NAME-metallb-speaker"
+ msg = sprintf("speaker serviceAccountName '%s' does not match expected value", [serviceAccountName])
+}
+
+# validate METALLB_ML_SECRET_KEY (memberlist)
+deny[msg] {
+ input.kind == "DaemonSet"
+ not input.spec.template.spec.containers[0].env[5].name == "METALLB_ML_SECRET_KEY_PATH"
+ msg = "speaker env does not contain METALLB_ML_SECRET_KEY_PATH at env[5]"
+}
+
+# validate node selector includes builtin when custom ones are provided
+deny[msg] {
+ input.kind == "DaemonSet"
+ not input.spec.template.spec.nodeSelector["kubernetes.io/os"] == "linux"
+ msg = "controller nodeSelector does not include '\"kubernetes.io/os\": linux'"
+}
+
+# validate tolerations include the builtins when custom ones are provided
+deny[msg] {
+ input.kind == "DaemonSet"
+ not input.spec.template.spec.tolerations[0] == { "key": "node-role.kubernetes.io/master", "effect": "NoSchedule", "operator": "Exists" }
+ msg = "controller tolerations does not include node-role.kubernetes.io/master:NoSchedule"
+}
diff --git a/charts/metallb/templates/NOTES.txt b/charts/metallb/templates/NOTES.txt
new file mode 100644
index 0000000..23d1d5b
--- /dev/null
+++ b/charts/metallb/templates/NOTES.txt
@@ -0,0 +1,4 @@
+MetalLB is now running in the cluster.
+
+Now you can configure it via its CRs. Please refer to the metallb official docs
+on how to use the CRs.
diff --git a/charts/metallb/templates/_helpers.tpl b/charts/metallb/templates/_helpers.tpl
new file mode 100644
index 0000000..53d9528
--- /dev/null
+++ b/charts/metallb/templates/_helpers.tpl
@@ -0,0 +1,113 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "metallb.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "metallb.fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "metallb.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "metallb.labels" -}}
+helm.sh/chart: {{ include "metallb.chart" . }}
+{{ include "metallb.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "metallb.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "metallb.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Create the name of the controller service account to use
+*/}}
+{{- define "metallb.controller.serviceAccountName" -}}
+{{- if .Values.controller.serviceAccount.create }}
+{{- default (printf "%s-controller" (include "metallb.fullname" .)) .Values.controller.serviceAccount.name }}
+{{- else }}
+{{- default "default" .Values.controller.serviceAccount.name }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create the name of the speaker service account to use
+*/}}
+{{- define "metallb.speaker.serviceAccountName" -}}
+{{- if .Values.speaker.serviceAccount.create }}
+{{- default (printf "%s-speaker" (include "metallb.fullname" .)) .Values.speaker.serviceAccount.name }}
+{{- else }}
+{{- default "default" .Values.speaker.serviceAccount.name }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create the name of the settings Secret to use.
+*/}}
+{{- define "metallb.secretName" -}}
+ {{ default ( printf "%s-memberlist" (include "metallb.fullname" .)) .Values.speaker.secretName | trunc 63 | trimSuffix "-" }}
+{{- end -}}
+
+{{- define "metrics.exposedportname" -}}
+{{- if .Values.prometheus.secureMetricsPort -}}
+"metricshttps"
+{{- else -}}
+"metrics"
+{{- end -}}
+{{- end -}}
+
+{{- define "metrics.exposedfrrportname" -}}
+{{- if .Values.speaker.frr.secureMetricsPort -}}
+"frrmetricshttps"
+{{- else -}}
+"frrmetrics"
+{{- end }}
+{{- end }}
+
+{{- define "metrics.exposedport" -}}
+{{- if .Values.prometheus.secureMetricsPort -}}
+{{ .Values.prometheus.secureMetricsPort }}
+{{- else -}}
+{{ .Values.prometheus.metricsPort }}
+{{- end -}}
+{{- end }}
+
+{{- define "metrics.exposedfrrport" -}}
+{{- if .Values.speaker.frr.secureMetricsPort -}}
+{{ .Values.speaker.frr.secureMetricsPort }}
+{{- else -}}
+{{ .Values.speaker.frr.metricsPort }}
+{{- end }}
+{{- end }}
diff --git a/charts/metallb/templates/controller.yaml b/charts/metallb/templates/controller.yaml
new file mode 100644
index 0000000..2b522d1
--- /dev/null
+++ b/charts/metallb/templates/controller.yaml
@@ -0,0 +1,182 @@
+{{- if .Values.controller.enabled }}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ template "metallb.fullname" . }}-controller
+ namespace: {{ .Release.Namespace | quote }}
+ labels:
+ {{- include "metallb.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- range $key, $value := .Values.controller.labels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+spec:
+ {{- if .Values.controller.strategy }}
+ strategy: {{- toYaml .Values.controller.strategy | nindent 4 }}
+ {{- end }}
+ selector:
+ matchLabels:
+ {{- include "metallb.selectorLabels" . | nindent 6 }}
+ app.kubernetes.io/component: controller
+ template:
+ metadata:
+ {{- if or .Values.prometheus.scrapeAnnotations .Values.controller.podAnnotations }}
+ annotations:
+ {{- if .Values.prometheus.scrapeAnnotations }}
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "{{ .Values.prometheus.metricsPort }}"
+ {{- end }}
+ {{- with .Values.controller.podAnnotations }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- end }}
+ labels:
+ {{- include "metallb.selectorLabels" . | nindent 8 }}
+ app.kubernetes.io/component: controller
+ {{- range $key, $value := .Values.controller.labels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ spec:
+ {{- with .Values.controller.runtimeClassName }}
+ runtimeClassName: {{ . | quote }}
+ {{- end }}
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ template "metallb.controller.serviceAccountName" . }}
+ terminationGracePeriodSeconds: 0
+{{- if .Values.controller.securityContext }}
+ securityContext:
+{{ toYaml .Values.controller.securityContext | indent 8 }}
+{{- end }}
+ containers:
+ - name: controller
+ image: {{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag | default .Chart.AppVersion }}
+ {{- if .Values.controller.image.pullPolicy }}
+ imagePullPolicy: {{ .Values.controller.image.pullPolicy }}
+ {{- end }}
+ {{- if .Values.controller.command }}
+ command:
+ - {{ .Values.controller.command }}
+ {{- end }}
+ args:
+ - --port={{ .Values.prometheus.metricsPort }}
+ {{- with .Values.controller.logLevel }}
+ - --log-level={{ . }}
+ {{- end }}
+ - --cert-service-name=metallb-webhook-service
+ {{- if .Values.loadBalancerClass }}
+ - --lb-class={{ .Values.loadBalancerClass }}
+ {{- end }}
+ {{- if .Values.controller.webhookMode }}
+ - --webhook-mode={{ .Values.controller.webhookMode }}
+ {{- end }}
+ env:
+ {{- if and .Values.speaker.enabled .Values.speaker.memberlist.enabled }}
+ - name: METALLB_ML_SECRET_NAME
+ value: {{ include "metallb.secretName" . }}
+ - name: METALLB_DEPLOYMENT
+ value: {{ template "metallb.fullname" . }}-controller
+ {{- end }}
+ {{- if .Values.speaker.frr.enabled }}
+ - name: METALLB_BGP_TYPE
+ value: frr
+ {{- end }}
+ ports:
+ - name: monitoring
+ containerPort: {{ .Values.prometheus.metricsPort }}
+ - containerPort: 9443
+ name: webhook-server
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /tmp/k8s-webhook-server/serving-certs
+ name: cert
+ readOnly: true
+ {{- if .Values.controller.livenessProbe.enabled }}
+ livenessProbe:
+ httpGet:
+ path: /metrics
+ port: monitoring
+ initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.controller.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }}
+ {{- end }}
+ {{- if .Values.controller.readinessProbe.enabled }}
+ readinessProbe:
+ httpGet:
+ path: /metrics
+ port: monitoring
+ initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.controller.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }}
+ {{- end }}
+ {{- with .Values.controller.resources }}
+ resources:
+ {{- toYaml . | nindent 10 }}
+ {{- end }}
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ capabilities:
+ drop:
+ - ALL
+ {{- if .Values.prometheus.secureMetricsPort }}
+ - name: kube-rbac-proxy
+ image: {{ .Values.prometheus.rbacProxy.repository }}:{{ .Values.prometheus.rbacProxy.tag }}
+ imagePullPolicy: {{ .Values.prometheus.rbacProxy.pullPolicy }}
+ args:
+ - --logtostderr
+ - --secure-listen-address=:{{ .Values.prometheus.secureMetricsPort }}
+ - --upstream=http://127.0.0.1:{{ .Values.prometheus.metricsPort }}/
+ - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
+ {{- if .Values.prometheus.controllerMetricsTLSSecret }}
+ - --tls-private-key-file=/etc/metrics/tls.key
+ - --tls-cert-file=/etc/metrics/tls.crt
+ {{- end }}
+ ports:
+ - containerPort: {{ .Values.prometheus.secureMetricsPort }}
+ name: metricshttps
+ resources:
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ terminationMessagePolicy: FallbackToLogsOnError
+ {{- if .Values.prometheus.controllerMetricsTLSSecret }}
+ volumeMounts:
+ - name: metrics-certs
+ mountPath: /etc/metrics
+ readOnly: true
+ {{- end }}
+ {{ end }}
+ nodeSelector:
+ "kubernetes.io/os": linux
+ {{- with .Values.controller.nodeSelector }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.controller.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.controller.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ {{- with .Values.controller.priorityClassName }}
+ priorityClassName: {{ . | quote }}
+ {{- end }}
+ volumes:
+ - name: cert
+ secret:
+ defaultMode: 420
+ secretName: webhook-server-cert
+ {{- if .Values.prometheus.controllerMetricsTLSSecret }}
+ - name: metrics-certs
+ secret:
+ secretName: {{ .Values.prometheus.controllerMetricsTLSSecret }}
+ {{- end }}
+{{- end }}
diff --git a/charts/metallb/templates/deprecated_configInline.yaml b/charts/metallb/templates/deprecated_configInline.yaml
new file mode 100644
index 0000000..8a1a551
--- /dev/null
+++ b/charts/metallb/templates/deprecated_configInline.yaml
@@ -0,0 +1,3 @@
+{{- if .Values.configInline }}
+{{- fail "Starting from v0.13.0 configInline is no longer supported. Please see https://metallb.universe.tf/#backward-compatibility" }}
+{{- end }}
diff --git a/charts/metallb/templates/exclude-l2-config.yaml b/charts/metallb/templates/exclude-l2-config.yaml
new file mode 100644
index 0000000..cacea8f
--- /dev/null
+++ b/charts/metallb/templates/exclude-l2-config.yaml
@@ -0,0 +1,23 @@
+{{- if .Values.speaker.excludeInterfaces.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: metallb-excludel2
+ namespace: {{ .Release.Namespace | quote }}
+data:
+ excludel2.yaml: |
+ announcedInterfacesToExclude:
+ - ^docker.*
+ - ^cbr.*
+ - ^dummy.*
+ - ^virbr.*
+ - ^lxcbr.*
+ - ^veth.*
+ - ^lo$
+ - ^cali.*
+ - ^tunl.*
+ - ^flannel.*
+ - ^kube-ipvs.*
+ - ^cni.*
+ - ^nodelocaldns.*
+{{- end }}
\ No newline at end of file
diff --git a/charts/metallb/templates/podmonitor.yaml b/charts/metallb/templates/podmonitor.yaml
new file mode 100644
index 0000000..93a7fd6
--- /dev/null
+++ b/charts/metallb/templates/podmonitor.yaml
@@ -0,0 +1,106 @@
+{{- if .Values.prometheus.podMonitor.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: PodMonitor
+metadata:
+ name: {{ template "metallb.fullname" . }}-controller
+ labels:
+ {{- include "metallb.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- if .Values.prometheus.podMonitor.additionalLabels }}
+{{ toYaml .Values.prometheus.podMonitor.additionalLabels | indent 4 }}
+ {{- end }}
+ {{- if .Values.prometheus.podMonitor.annotations }}
+ annotations:
+{{ toYaml .Values.prometheus.podMonitor.annotations | indent 4 }}
+ {{- end }}
+spec:
+ jobLabel: {{ .Values.prometheus.podMonitor.jobLabel | quote }}
+ selector:
+ matchLabels:
+ {{- include "metallb.selectorLabels" . | nindent 6 }}
+ app.kubernetes.io/component: controller
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace }}
+ podMetricsEndpoints:
+ - port: monitoring
+ path: /metrics
+ {{- if .Values.prometheus.podMonitor.interval }}
+ interval: {{ .Values.prometheus.podMonitor.interval }}
+ {{- end }}
+{{- if .Values.prometheus.podMonitor.metricRelabelings }}
+ metricRelabelings:
+{{- toYaml .Values.prometheus.podMonitor.metricRelabelings | nindent 4 }}
+{{- end }}
+{{- if .Values.prometheus.podMonitor.relabelings }}
+ relabelings:
+{{- toYaml .Values.prometheus.podMonitor.relabelings | nindent 4 }}
+{{- end }}
+---
+apiVersion: monitoring.coreos.com/v1
+kind: PodMonitor
+metadata:
+ name: {{ template "metallb.fullname" . }}-speaker
+ labels:
+ {{- include "metallb.labels" . | nindent 4 }}
+ app.kubernetes.io/component: speaker
+ {{- if .Values.prometheus.podMonitor.additionalLabels }}
+{{ toYaml .Values.prometheus.podMonitor.additionalLabels | indent 4 }}
+ {{- end }}
+ {{- if .Values.prometheus.podMonitor.annotations }}
+ annotations:
+{{ toYaml .Values.prometheus.podMonitor.annotations | indent 4 }}
+ {{- end }}
+spec:
+ jobLabel: {{ .Values.prometheus.podMonitor.jobLabel | quote }}
+ selector:
+ matchLabels:
+ {{- include "metallb.selectorLabels" . | nindent 6 }}
+ app.kubernetes.io/component: speaker
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace }}
+ podMetricsEndpoints:
+ - port: monitoring
+ path: /metrics
+ {{- if .Values.prometheus.podMonitor.interval }}
+ interval: {{ .Values.prometheus.podMonitor.interval }}
+ {{- end }}
+{{- if .Values.prometheus.podMonitor.metricRelabelings }}
+ metricRelabelings:
+{{- toYaml .Values.prometheus.podMonitor.metricRelabelings | nindent 4 }}
+{{- end }}
+{{- if .Values.prometheus.podMonitor.relabelings }}
+ relabelings:
+{{- toYaml .Values.prometheus.podMonitor.relabelings | nindent 4 }}
+{{- end }}
+---
+{{- if .Values.prometheus.rbacPrometheus }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ template "metallb.fullname" . }}-prometheus
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ template "metallb.fullname" . }}-prometheus
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ template "metallb.fullname" . }}-prometheus
+subjects:
+ - kind: ServiceAccount
+ name: {{ required ".Values.prometheus.serviceAccount must be defined when .Values.prometheus.podMonitor.enabled == true" .Values.prometheus.serviceAccount }}
+ namespace: {{ required ".Values.prometheus.namespace must be defined when .Values.prometheus.podMonitor.enabled == true" .Values.prometheus.namespace }}
+{{- end }}
+{{- end }}
diff --git a/charts/metallb/templates/prometheusrules.yaml b/charts/metallb/templates/prometheusrules.yaml
new file mode 100644
index 0000000..463aaca
--- /dev/null
+++ b/charts/metallb/templates/prometheusrules.yaml
@@ -0,0 +1,84 @@
+{{- if .Values.prometheus.prometheusRule.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: PrometheusRule
+metadata:
+ name: {{ template "metallb.fullname" . }}
+ labels:
+ {{- include "metallb.labels" . | nindent 4 }}
+ {{- if .Values.prometheus.prometheusRule.additionalLabels }}
+{{ toYaml .Values.prometheus.prometheusRule.additionalLabels | indent 4 }}
+ {{- end }}
+ {{- if .Values.prometheus.prometheusRule.annotations }}
+ annotations:
+{{ toYaml .Values.prometheus.prometheusRule.annotations | indent 4 }}
+ {{- end }}
+spec:
+ groups:
+ - name: {{ template "metallb.fullname" . }}.rules
+ rules:
+ {{- if .Values.prometheus.prometheusRule.staleConfig.enabled }}
+ - alert: MetalLBStaleConfig
+ annotations:
+ message: {{`'{{ $labels.job }} - MetalLB {{ $labels.container }} on {{ $labels.pod
+ }} has a stale config for > 1 minute'`}}
+ expr: metallb_k8s_client_config_stale_bool{job="{{ include "metallb.name" . }}"} == 1
+ for: 1m
+ {{- with .Values.prometheus.prometheusRule.staleConfig.labels }}
+ labels:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.prometheus.prometheusRule.configNotLoaded.enabled }}
+ - alert: MetalLBConfigNotLoaded
+ annotations:
+ message: {{`'{{ $labels.job }} - MetalLB {{ $labels.container }} on {{ $labels.pod
+ }} has not loaded for > 1 minute'`}}
+ expr: metallb_k8s_client_config_loaded_bool{job="{{ include "metallb.name" . }}"} == 0
+ for: 1m
+ {{- with .Values.prometheus.prometheusRule.configNotLoaded.labels }}
+ labels:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.prometheus.prometheusRule.addressPoolExhausted.enabled }}
+ - alert: MetalLBAddressPoolExhausted
+ annotations:
+ message: {{`'{{ $labels.job }} - MetalLB {{ $labels.container }} on {{ $labels.pod
+ }} has exhausted address pool {{ $labels.pool }} for > 1 minute'`}}
+ expr: metallb_allocator_addresses_in_use_total >= on(pool) metallb_allocator_addresses_total
+ for: 1m
+ {{- with .Values.prometheus.prometheusRule.addressPoolExhausted.labels }}
+ labels:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- end }}
+
+ {{- if .Values.prometheus.prometheusRule.addressPoolUsage.enabled }}
+ {{- range .Values.prometheus.prometheusRule.addressPoolUsage.thresholds }}
+ - alert: MetalLBAddressPoolUsage{{ .percent }}Percent
+ annotations:
+ message: {{`'{{ $labels.job }} - MetalLB {{ $labels.container }} on {{ $labels.pod
+ }} has address pool {{ $labels.pool }} past `}}{{ .percent }}{{`% usage for > 1 minute'`}}
+ expr: ( metallb_allocator_addresses_in_use_total / on(pool) metallb_allocator_addresses_total ) * 100 > {{ .percent }}
+ {{- with .labels }}
+ labels:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.prometheus.prometheusRule.bgpSessionDown.enabled }}
+ - alert: MetalLBBGPSessionDown
+ annotations:
+ message: {{`'{{ $labels.job }} - MetalLB {{ $labels.container }} on {{ $labels.pod
+ }} has BGP session {{ $labels.peer }} down for > 1 minute'`}}
+ expr: metallb_bgp_session_up{job="{{ include "metallb.name" . }}"} == 0
+ for: 1m
+ {{- with .Values.prometheus.prometheusRule.bgpSessionDown.labels }}
+ labels:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- end }}
+ {{- with .Values.prometheus.prometheusRule.extraAlerts }}
+ {{- toYaml . | nindent 4 }}
+ {{- end}}
+{{- end }}
diff --git a/charts/metallb/templates/rbac.yaml b/charts/metallb/templates/rbac.yaml
new file mode 100644
index 0000000..ed6b826
--- /dev/null
+++ b/charts/metallb/templates/rbac.yaml
@@ -0,0 +1,210 @@
+{{- if .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "metallb.fullname" . }}:controller
+ labels:
+ {{- include "metallb.labels" . | nindent 4 }}
+rules:
+- apiGroups: [""]
+ resources: ["services", "namespaces"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["list"]
+- apiGroups: [""]
+ resources: ["services/status"]
+ verbs: ["update"]
+- apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "patch"]
+- apiGroups: ["admissionregistration.k8s.io"]
+ resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"]
+ resourceNames: ["metallb-webhook-configuration"]
+ verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
+- apiGroups: ["admissionregistration.k8s.io"]
+ resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"]
+ verbs: ["list", "watch"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ resourceNames: ["addresspools.metallb.io","bfdprofiles.metallb.io","bgpadvertisements.metallb.io",
+ "bgppeers.metallb.io","ipaddresspools.metallb.io","l2advertisements.metallb.io","communities.metallb.io"]
+ verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["list", "watch"]
+{{- if .Values.prometheus.secureMetricsPort }}
+- apiGroups: ["authentication.k8s.io"]
+ resources: ["tokenreviews"]
+ verbs: ["create"]
+- apiGroups: ["authorization.k8s.io"]
+ resources: ["subjectaccessreviews"]
+ verbs: ["create"]
+{{- end }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "metallb.fullname" . }}:speaker
+ labels:
+ {{- include "metallb.labels" . | nindent 4 }}
+rules:
+- apiGroups: [""]
+ resources: ["services", "endpoints", "nodes", "namespaces"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["discovery.k8s.io"]
+ resources: ["endpointslices"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "patch"]
+{{- if .Values.prometheus.secureMetricsPort }}
+- apiGroups: ["authentication.k8s.io"]
+ resources: ["tokenreviews"]
+ verbs: ["create"]
+- apiGroups: ["authorization.k8s.io"]
+ resources: ["subjectaccessreviews"]
+ verbs: ["create"]
+{{- end }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ include "metallb.fullname" . }}-pod-lister
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "metallb.labels" . | nindent 4 }}
+rules:
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["list"]
+- apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: [""]
+ resources: ["configmaps"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["metallb.io"]
+ resources: ["addresspools"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["metallb.io"]
+ resources: ["bfdprofiles"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["metallb.io"]
+ resources: ["bgppeers"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["metallb.io"]
+ resources: ["l2advertisements"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["metallb.io"]
+ resources: ["bgpadvertisements"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["metallb.io"]
+ resources: ["ipaddresspools"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["metallb.io"]
+ resources: ["communities"]
+ verbs: ["get", "list", "watch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ include "metallb.fullname" . }}-controller
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "metallb.labels" . | nindent 4 }}
+rules:
+{{- if .Values.speaker.memberlist.enabled }}
+- apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["create", "get", "list", "watch"]
+- apiGroups: [""]
+ resources: ["secrets"]
+ resourceNames: [{{ include "metallb.secretName" . | quote }}]
+ verbs: ["list"]
+- apiGroups: ["apps"]
+ resources: ["deployments"]
+ resourceNames: ["{{ template "metallb.fullname" . }}-controller"]
+ verbs: ["get"]
+{{- end }}
+- apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
+- apiGroups: ["metallb.io"]
+ resources: ["addresspools"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["metallb.io"]
+ resources: ["ipaddresspools"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["metallb.io"]
+ resources: ["bgppeers"]
+ verbs: ["get", "list"]
+- apiGroups: ["metallb.io"]
+ resources: ["bgpadvertisements"]
+ verbs: ["get", "list"]
+- apiGroups: ["metallb.io"]
+ resources: ["l2advertisements"]
+ verbs: ["get", "list"]
+- apiGroups: ["metallb.io"]
+ resources: ["communities"]
+ verbs: ["get", "list","watch"]
+- apiGroups: ["metallb.io"]
+ resources: ["bfdprofiles"]
+ verbs: ["get", "list","watch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "metallb.fullname" . }}:controller
+ labels:
+ {{- include "metallb.labels" . | nindent 4 }}
+subjects:
+- kind: ServiceAccount
+ name: {{ template "metallb.controller.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "metallb.fullname" . }}:controller
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "metallb.fullname" . }}:speaker
+ labels:
+ {{- include "metallb.labels" . | nindent 4 }}
+subjects:
+- kind: ServiceAccount
+ name: {{ template "metallb.speaker.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "metallb.fullname" . }}:speaker
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ include "metallb.fullname" . }}-pod-lister
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "metallb.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ include "metallb.fullname" . }}-pod-lister
+subjects:
+- kind: ServiceAccount
+ name: {{ include "metallb.speaker.serviceAccountName" . }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ include "metallb.fullname" . }}-controller
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "metallb.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ include "metallb.fullname" . }}-controller
+subjects:
+- kind: ServiceAccount
+ name: {{ include "metallb.controller.serviceAccountName" . }}
+{{- end -}}
diff --git a/charts/metallb/templates/service-accounts.yaml b/charts/metallb/templates/service-accounts.yaml
new file mode 100644
index 0000000..9615acf
--- /dev/null
+++ b/charts/metallb/templates/service-accounts.yaml
@@ -0,0 +1,30 @@
+{{- if .Values.controller.serviceAccount.create }}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ template "metallb.controller.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels:
+ {{- include "metallb.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end }}
+{{- if .Values.speaker.serviceAccount.create }}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ template "metallb.speaker.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels:
+ {{- include "metallb.labels" . | nindent 4 }}
+ app.kubernetes.io/component: speaker
+ {{- with .Values.speaker.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/metallb/templates/servicemonitor.yaml b/charts/metallb/templates/servicemonitor.yaml
new file mode 100644
index 0000000..1cfc0c4
--- /dev/null
+++ b/charts/metallb/templates/servicemonitor.yaml
@@ -0,0 +1,193 @@
+{{- if .Values.prometheus.serviceMonitor.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: {{ template "metallb.fullname" . }}-speaker-monitor
+ namespace: {{ .Release.Namespace | quote }}
+ labels:
+ {{- include "metallb.labels" . | nindent 4 }}
+ app.kubernetes.io/component: speaker
+ {{- if .Values.prometheus.serviceMonitor.speaker.additionalLabels }}
+{{ toYaml .Values.prometheus.serviceMonitor.speaker.additionalLabels | indent 4 }}
+ {{- end }}
+ {{- if .Values.prometheus.serviceMonitor.speaker.annotations }}
+ annotations:
+{{ toYaml .Values.prometheus.serviceMonitor.speaker.annotations | indent 4 }}
+ {{- end }}
+spec:
+ endpoints:
+ - port: {{ template "metrics.exposedportname" . }}
+ honorLabels: true
+ {{- if .Values.prometheus.serviceMonitor.metricRelabelings }}
+ metricRelabelings:
+ {{- toYaml .Values.prometheus.serviceMonitor.metricRelabelings | nindent 8 }}
+ {{- end -}}
+ {{- if .Values.prometheus.serviceMonitor.relabelings }}
+ relabelings:
+ {{- toYaml .Values.prometheus.serviceMonitor.relabelings | nindent 8 }}
+ {{- end }}
+ {{- if .Values.prometheus.serviceMonitor.interval }}
+ interval: {{ .Values.prometheus.serviceMonitor.interval }}
+ {{- end -}}
+{{ if .Values.prometheus.secureMetricsPort }}
+ bearerTokenFile: "/var/run/secrets/kubernetes.io/serviceaccount/token"
+ scheme: "https"
+{{- if .Values.prometheus.serviceMonitor.speaker.tlsConfig }}
+ tlsConfig:
+{{ toYaml .Values.prometheus.serviceMonitor.speaker.tlsConfig | indent 8 }}
+{{- end }}
+{{ end }}
+{{- if .Values.speaker.frr.enabled }}
+ - port: {{ template "metrics.exposedfrrportname" . }}
+ honorLabels: true
+{{ if .Values.speaker.frr.secureMetricsPort }}
+ {{- if .Values.prometheus.serviceMonitor.interval }}
+ interval: {{ .Values.prometheus.serviceMonitor.interval }}
+ {{- end }}
+ bearerTokenFile: "/var/run/secrets/kubernetes.io/serviceaccount/token"
+ scheme: "https"
+{{- if .Values.prometheus.serviceMonitor.speaker.tlsConfig }}
+ tlsConfig:
+{{ toYaml .Values.prometheus.serviceMonitor.speaker.tlsConfig | indent 8 }}
+{{- end }}
+{{- end }}
+{{- end }}
+ jobLabel: {{ .Values.prometheus.serviceMonitor.jobLabel | quote }}
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace }}
+ selector:
+ matchLabels:
+ name: {{ template "metallb.fullname" . }}-speaker-monitor-service
+---
+apiVersion: v1
+kind: Service
+metadata:
+ annotations:
+ prometheus.io/scrape: "true"
+ {{- if .Values.prometheus.serviceMonitor.speaker.annotations }}
+{{ toYaml .Values.prometheus.serviceMonitor.speaker.annotations | indent 4 }}
+ {{- end }}
+ labels:
+ name: {{ template "metallb.fullname" . }}-speaker-monitor-service
+ name: {{ template "metallb.fullname" . }}-speaker-monitor-service
+ namespace: {{ .Release.Namespace | quote }}
+spec:
+ selector:
+ {{- include "metallb.selectorLabels" . | nindent 4 }}
+ app.kubernetes.io/component: speaker
+ clusterIP: None
+ ports:
+ - name: {{ template "metrics.exposedportname" . }}
+ port: {{ template "metrics.exposedport" . }}
+ targetPort: {{ template "metrics.exposedport" . }}
+{{- if .Values.speaker.frr.enabled }}
+ - name: {{ template "metrics.exposedfrrportname" . }}
+ port: {{ template "metrics.exposedfrrport" . }}
+ targetPort: {{ template "metrics.exposedfrrport" . }}
+{{- end }}
+ sessionAffinity: None
+ type: ClusterIP
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: {{ template "metallb.fullname" . }}-controller-monitor
+ namespace: {{ .Release.Namespace | quote }}
+ labels:
+ {{- include "metallb.labels" . | nindent 4 }}
+ app.kubernetes.io/component: speaker
+ {{- if .Values.prometheus.serviceMonitor.controller.additionalLabels }}
+{{ toYaml .Values.prometheus.serviceMonitor.controller.additionalLabels | indent 4 }}
+ {{- end }}
+ {{- if .Values.prometheus.serviceMonitor.controller.annotations }}
+ annotations:
+{{ toYaml .Values.prometheus.serviceMonitor.controller.annotations | indent 4 }}
+ {{- end }}
+spec:
+ endpoints:
+ - port: {{ template "metrics.exposedportname" . }}
+ {{- if .Values.prometheus.serviceMonitor.metricRelabelings }}
+ metricRelabelings:
+ {{- toYaml .Values.prometheus.serviceMonitor.metricRelabelings | nindent 8 }}
+ {{- end -}}
+ {{- if .Values.prometheus.serviceMonitor.relabelings }}
+ relabelings:
+ {{- toYaml .Values.prometheus.serviceMonitor.relabelings | nindent 8 }}
+ {{- end }}
+ {{- if .Values.prometheus.serviceMonitor.interval }}
+ interval: {{ .Values.prometheus.serviceMonitor.interval }}
+ {{- end }}
+ honorLabels: true
+{{- if .Values.prometheus.secureMetricsPort }}
+ bearerTokenFile: "/var/run/secrets/kubernetes.io/serviceaccount/token"
+ scheme: "https"
+{{- if .Values.prometheus.serviceMonitor.controller.tlsConfig }}
+ tlsConfig:
+{{ toYaml .Values.prometheus.serviceMonitor.controller.tlsConfig | indent 8 }}
+{{- end }}
+{{- end }}
+ jobLabel: {{ .Values.prometheus.serviceMonitor.jobLabel | quote }}
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace }}
+ selector:
+ matchLabels:
+ name: {{ template "metallb.fullname" . }}-controller-monitor-service
+---
+apiVersion: v1
+kind: Service
+metadata:
+ annotations:
+ prometheus.io/scrape: "true"
+ {{- if .Values.prometheus.serviceMonitor.controller.annotations }}
+{{ toYaml .Values.prometheus.serviceMonitor.controller.annotations | indent 4 }}
+ {{- end }}
+ labels:
+ name: {{ template "metallb.fullname" . }}-controller-monitor-service
+ name: {{ template "metallb.fullname" . }}-controller-monitor-service
+spec:
+ selector:
+ {{- include "metallb.selectorLabels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ clusterIP: None
+ ports:
+ - name: {{ template "metrics.exposedportname" . }}
+ port: {{ template "metrics.exposedport" . }}
+ targetPort: {{ template "metrics.exposedport" . }}
+ sessionAffinity: None
+ type: ClusterIP
+---
+{{- if .Values.prometheus.rbacPrometheus }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ template "metallb.fullname" . }}-prometheus
+ namespace: {{ .Release.Namespace | quote }}
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - services
+ - endpoints
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ template "metallb.fullname" . }}-prometheus
+ namespace: {{ .Release.Namespace | quote }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ template "metallb.fullname" . }}-prometheus
+subjects:
+ - kind: ServiceAccount
+ name: {{ required ".Values.prometheus.serviceAccount must be defined when .Values.prometheus.serviceMonitor.enabled == true" .Values.prometheus.serviceAccount }}
+ namespace: {{ required ".Values.prometheus.namespace must be defined when .Values.prometheus.serviceMonitor.enabled == true" .Values.prometheus.namespace }}
+{{- end }}
+{{- end }}
diff --git a/charts/metallb/templates/speaker.yaml b/charts/metallb/templates/speaker.yaml
new file mode 100644
index 0000000..1a4c7b2
--- /dev/null
+++ b/charts/metallb/templates/speaker.yaml
@@ -0,0 +1,510 @@
+{{- if .Values.speaker.frr.enabled }}
+# FRR expects to have these files owned by frr:frr on startup.
+# Having them in a ConfigMap allows us to modify behaviors: for example enabling more daemons on startup.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "metallb.fullname" . }}-frr-startup
+ namespace: {{ .Release.Namespace | quote }}
+ labels:
+ {{- include "metallb.labels" . | nindent 4 }}
+ app.kubernetes.io/component: speaker
+data:
+ daemons: |
+ # This file tells the frr package which daemons to start.
+ #
+ # Sample configurations for these daemons can be found in
+ # /usr/share/doc/frr/examples/.
+ #
+ # ATTENTION:
+ #
+ # When activating a daemon for the first time, a config file, even if it is
+ # empty, has to be present *and* be owned by the user and group "frr", else
+ # the daemon will not be started by /etc/init.d/frr. The permissions should
+ # be u=rw,g=r,o=.
+ # When using "vtysh" such a config file is also needed. It should be owned by
+ # group "frrvty" and set to ug=rw,o= though. Check /etc/pam.d/frr, too.
+ #
+ # The watchfrr and zebra daemons are always started.
+ #
+ bgpd=yes
+ ospfd=no
+ ospf6d=no
+ ripd=no
+ ripngd=no
+ isisd=no
+ pimd=no
+ ldpd=no
+ nhrpd=no
+ eigrpd=no
+ babeld=no
+ sharpd=no
+ pbrd=no
+ bfdd=yes
+ fabricd=no
+ vrrpd=no
+
+ #
+ # If this option is set the /etc/init.d/frr script automatically loads
+ # the config via "vtysh -b" when the servers are started.
+ # Check /etc/pam.d/frr if you intend to use "vtysh"!
+ #
+ vtysh_enable=yes
+ zebra_options=" -A 127.0.0.1 -s 90000000"
+ bgpd_options=" -A 127.0.0.1 -p 0"
+ ospfd_options=" -A 127.0.0.1"
+ ospf6d_options=" -A ::1"
+ ripd_options=" -A 127.0.0.1"
+ ripngd_options=" -A ::1"
+ isisd_options=" -A 127.0.0.1"
+ pimd_options=" -A 127.0.0.1"
+ ldpd_options=" -A 127.0.0.1"
+ nhrpd_options=" -A 127.0.0.1"
+ eigrpd_options=" -A 127.0.0.1"
+ babeld_options=" -A 127.0.0.1"
+ sharpd_options=" -A 127.0.0.1"
+ pbrd_options=" -A 127.0.0.1"
+ staticd_options="-A 127.0.0.1"
+ bfdd_options=" -A 127.0.0.1"
+ fabricd_options="-A 127.0.0.1"
+ vrrpd_options=" -A 127.0.0.1"
+
+ # configuration profile
+ #
+ #frr_profile="traditional"
+ #frr_profile="datacenter"
+
+ #
+ # This is the maximum number of FD's that will be available.
+ # Upon startup this is read by the control files and ulimit
+ # is called. Uncomment and use a reasonable value for your
+ # setup if you are expecting a large number of peers in
+ # say BGP.
+ #MAX_FDS=1024
+
+ # The list of daemons to watch is automatically generated by the init script.
+ #watchfrr_options=""
+
+ # for debugging purposes, you can specify a "wrap" command to start instead
+ # of starting the daemon directly, e.g. to use valgrind on ospfd:
+ # ospfd_wrap="/usr/bin/valgrind"
+ # or you can use "all_wrap" for all daemons, e.g. to use perf record:
+ # all_wrap="/usr/bin/perf record --call-graph -"
+ # the normal daemon command is added to this at the end.
+ vtysh.conf: |+
+ service integrated-vtysh-config
+ frr.conf: |+
+ ! This file gets overriden the first time the speaker renders a config.
+ ! So anything configured here is only temporary.
+ frr version 7.5.1
+ frr defaults traditional
+ hostname Router
+ line vty
+ log file /etc/frr/frr.log informational
+{{- end }}
+---
+{{- if .Values.speaker.enabled }}
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: {{ template "metallb.fullname" . }}-speaker
+ namespace: {{ .Release.Namespace | quote }}
+ labels:
+ {{- include "metallb.labels" . | nindent 4 }}
+ app.kubernetes.io/component: speaker
+ {{- range $key, $value := .Values.speaker.labels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+spec:
+ {{- if .Values.speaker.updateStrategy }}
+ updateStrategy: {{- toYaml .Values.speaker.updateStrategy | nindent 4 }}
+ {{- end }}
+ selector:
+ matchLabels:
+ {{- include "metallb.selectorLabels" . | nindent 6 }}
+ app.kubernetes.io/component: speaker
+ template:
+ metadata:
+ {{- if or .Values.prometheus.scrapeAnnotations .Values.speaker.podAnnotations }}
+ annotations:
+ {{- if .Values.prometheus.scrapeAnnotations }}
+ prometheus.io/scrape: "true"
+ {{- if not .Values.speaker.frr.enabled }}
+ prometheus.io/port: "{{ .Values.prometheus.metricsPort }}"
+ {{- end }}
+ {{- end }}
+ {{- with .Values.speaker.podAnnotations }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- end }}
+ labels:
+ {{- include "metallb.selectorLabels" . | nindent 8 }}
+ app.kubernetes.io/component: speaker
+ {{- range $key, $value := .Values.speaker.labels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ spec:
+ {{- if .Values.speaker.runtimeClassName }}
+ runtimeClassName: {{ .Values.speaker.runtimeClassName }}
+ {{- end }}
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ template "metallb.speaker.serviceAccountName" . }}
+ terminationGracePeriodSeconds: 0
+ hostNetwork: true
+ volumes:
+ {{- if .Values.speaker.memberlist.enabled }}
+ - name: memberlist
+ secret:
+ secretName: {{ include "metallb.secretName" . }}
+ defaultMode: 420
+ {{- end }}
+ {{- if .Values.speaker.excludeInterfaces.enabled }}
+ - name: metallb-excludel2
+ configMap:
+ defaultMode: 256
+ name: metallb-excludel2
+ {{- end }}
+ {{- if .Values.speaker.frr.enabled }}
+ - name: frr-sockets
+ emptyDir: {}
+ - name: frr-startup
+ configMap:
+ name: {{ template "metallb.fullname" . }}-frr-startup
+ - name: frr-conf
+ emptyDir: {}
+ - name: reloader
+ emptyDir: {}
+ - name: metrics
+ emptyDir: {}
+ {{- if .Values.prometheus.speakerMetricsTLSSecret }}
+ - name: metrics-certs
+ secret:
+ secretName: {{ .Values.prometheus.speakerMetricsTLSSecret }}
+ {{- end }}
+ initContainers:
+ # Copies the initial config files with the right permissions to the shared volume.
+ - name: cp-frr-files
+ image: {{ .Values.speaker.frr.image.repository }}:{{ .Values.speaker.frr.image.tag | default .Chart.AppVersion }}
+ securityContext:
+ runAsUser: 100
+ runAsGroup: 101
+ command: ["/bin/sh", "-c", "cp -rLf /tmp/frr/* /etc/frr/"]
+ volumeMounts:
+ - name: frr-startup
+ mountPath: /tmp/frr
+ - name: frr-conf
+ mountPath: /etc/frr
+ # Copies the reloader to the shared volume between the speaker and reloader.
+ - name: cp-reloader
+ image: {{ .Values.speaker.image.repository }}:{{ .Values.speaker.image.tag | default .Chart.AppVersion }}
+ command: ["/bin/sh", "-c", "cp -f /frr-reloader.sh /etc/frr_reloader/"]
+ volumeMounts:
+ - name: reloader
+ mountPath: /etc/frr_reloader
+ # Copies the metrics exporter
+ - name: cp-metrics
+ image: {{ .Values.speaker.image.repository }}:{{ .Values.speaker.image.tag | default .Chart.AppVersion }}
+ command: ["/bin/sh", "-c", "cp -f /frr-metrics /etc/frr_metrics/"]
+ volumeMounts:
+ - name: metrics
+ mountPath: /etc/frr_metrics
+ shareProcessNamespace: true
+ {{- end }}
+ containers:
+ - name: speaker
+ image: {{ .Values.speaker.image.repository }}:{{ .Values.speaker.image.tag | default .Chart.AppVersion }}
+ {{- if .Values.speaker.image.pullPolicy }}
+ imagePullPolicy: {{ .Values.speaker.image.pullPolicy }}
+ {{- end }}
+ {{- if .Values.speaker.command }}
+ command:
+ - {{ .Values.speaker.command }}
+ {{- end }}
+ args:
+ - --port={{ .Values.prometheus.metricsPort }}
+ {{- with .Values.speaker.logLevel }}
+ - --log-level={{ . }}
+ {{- end }}
+ {{- if .Values.loadBalancerClass }}
+ - --lb-class={{ .Values.loadBalancerClass }}
+ {{- end }}
+ env:
+ - name: METALLB_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: METALLB_HOST
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ {{- if .Values.speaker.memberlist.enabled }}
+ - name: METALLB_ML_BIND_ADDR
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: METALLB_ML_LABELS
+ value: "app.kubernetes.io/name={{ include "metallb.name" . }},app.kubernetes.io/component=speaker"
+ - name: METALLB_ML_BIND_PORT
+ value: "{{ .Values.speaker.memberlist.mlBindPort }}"
+ - name: METALLB_ML_SECRET_KEY_PATH
+ value: "{{ .Values.speaker.memberlist.mlSecretKeyPath }}"
+ {{- end }}
+ {{- if .Values.speaker.frr.enabled }}
+ - name: FRR_CONFIG_FILE
+ value: /etc/frr_reloader/frr.conf
+ - name: FRR_RELOADER_PID_FILE
+ value: /etc/frr_reloader/reloader.pid
+ - name: METALLB_BGP_TYPE
+ value: frr
+ {{- end }}
+ ports:
+ - name: monitoring
+ containerPort: {{ .Values.prometheus.metricsPort }}
+ {{- if .Values.speaker.memberlist.enabled }}
+ - name: memberlist-tcp
+ containerPort: {{ .Values.speaker.memberlist.mlBindPort }}
+ protocol: TCP
+ - name: memberlist-udp
+ containerPort: {{ .Values.speaker.memberlist.mlBindPort }}
+ protocol: UDP
+ {{- end }}
+ {{- if .Values.speaker.livenessProbe.enabled }}
+ livenessProbe:
+ httpGet:
+ path: /metrics
+ port: monitoring
+ initialDelaySeconds: {{ .Values.speaker.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.speaker.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.speaker.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.speaker.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.speaker.livenessProbe.failureThreshold }}
+ {{- end }}
+ {{- if .Values.speaker.readinessProbe.enabled }}
+ readinessProbe:
+ httpGet:
+ path: /metrics
+ port: monitoring
+ initialDelaySeconds: {{ .Values.speaker.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.speaker.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.speaker.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.speaker.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.speaker.readinessProbe.failureThreshold }}
+ {{- end }}
+ {{- with .Values.speaker.resources }}
+ resources:
+ {{- toYaml . | nindent 10 }}
+ {{- end }}
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ capabilities:
+ drop:
+ - ALL
+ add:
+ - NET_RAW
+ {{- if or .Values.speaker.frr.enabled .Values.speaker.memberlist.enabled .Values.speaker.excludeInterfaces.enabled }}
+ volumeMounts:
+ {{- if .Values.speaker.memberlist.enabled }}
+ - name: memberlist
+ mountPath: {{ .Values.speaker.memberlist.mlSecretKeyPath }}
+ {{- end }}
+ {{- if .Values.speaker.frr.enabled }}
+ - name: reloader
+ mountPath: /etc/frr_reloader
+ {{- end }}
+ {{- if .Values.speaker.excludeInterfaces.enabled }}
+ - name: metallb-excludel2
+ mountPath: /etc/metallb
+ {{- end }}
+ {{- end }}
+ {{- if .Values.speaker.frr.enabled }}
+ - name: frr
+ securityContext:
+ capabilities:
+ add:
+ - NET_ADMIN
+ - NET_RAW
+ - SYS_ADMIN
+ - NET_BIND_SERVICE
+ image: {{ .Values.speaker.frr.image.repository }}:{{ .Values.speaker.frr.image.tag | default .Chart.AppVersion }}
+ {{- if .Values.speaker.frr.image.pullPolicy }}
+ imagePullPolicy: {{ .Values.speaker.frr.image.pullPolicy }}
+ {{- end }}
+ env:
+ - name: TINI_SUBREAPER
+ value: "true"
+ volumeMounts:
+ - name: frr-sockets
+ mountPath: /var/run/frr
+ - name: frr-conf
+ mountPath: /etc/frr
+ # The command is FRR's default entrypoint & waiting for the log file to appear and tailing it.
+ # If the log file isn't created in 60 seconds the tail fails and the container is restarted.
+ # This workaround is needed to have the frr logs as part of kubectl logs -c frr < speaker_pod_name >.
+ command:
+ - /bin/sh
+ - -c
+ - |
+ /sbin/tini -- /usr/lib/frr/docker-start &
+ attempts=0
+ until [[ -f /etc/frr/frr.log || $attempts -eq 60 ]]; do
+ sleep 1
+ attempts=$(( $attempts + 1 ))
+ done
+ tail -f /etc/frr/frr.log
+ {{- with .Values.speaker.frr.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- if .Values.speaker.livenessProbe.enabled }}
+ livenessProbe:
+ httpGet:
+ path: /livez
+ port: {{ .Values.speaker.frr.metricsPort }}
+ initialDelaySeconds: {{ .Values.speaker.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.speaker.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.speaker.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.speaker.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.speaker.livenessProbe.failureThreshold }}
+ {{- end }}
+ {{- if .Values.speaker.startupProbe.enabled }}
+ startupProbe:
+ httpGet:
+ path: /livez
+ port: {{ .Values.speaker.frr.metricsPort }}
+ failureThreshold: {{ .Values.speaker.startupProbe.failureThreshold }}
+ periodSeconds: {{ .Values.speaker.startupProbe.periodSeconds }}
+ {{- end }}
+ - name: reloader
+ image: {{ .Values.speaker.frr.image.repository }}:{{ .Values.speaker.frr.image.tag | default .Chart.AppVersion }}
+ {{- if .Values.speaker.frr.image.pullPolicy }}
+ imagePullPolicy: {{ .Values.speaker.frr.image.pullPolicy }}
+ {{- end }}
+ command: ["/etc/frr_reloader/frr-reloader.sh"]
+ volumeMounts:
+ - name: frr-sockets
+ mountPath: /var/run/frr
+ - name: frr-conf
+ mountPath: /etc/frr
+ - name: reloader
+ mountPath: /etc/frr_reloader
+ {{- with .Values.speaker.reloader.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ - name: frr-metrics
+ image: {{ .Values.speaker.frr.image.repository }}:{{ .Values.speaker.frr.image.tag | default .Chart.AppVersion }}
+ command: ["/etc/frr_metrics/frr-metrics"]
+ args:
+ - --metrics-port={{ .Values.speaker.frr.metricsPort }}
+ ports:
+ - containerPort: {{ .Values.speaker.frr.metricsPort }}
+ name: monitoring
+ volumeMounts:
+ - name: frr-sockets
+ mountPath: /var/run/frr
+ - name: frr-conf
+ mountPath: /etc/frr
+ - name: metrics
+ mountPath: /etc/frr_metrics
+ {{- with .Values.speaker.frrMetrics.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.prometheus.secureMetricsPort }}
+ - name: kube-rbac-proxy
+ image: {{ .Values.prometheus.rbacProxy.repository }}:{{ .Values.prometheus.rbacProxy.tag }}
+ imagePullPolicy: {{ .Values.prometheus.rbacProxy.pullPolicy }}
+ args:
+ - --logtostderr
+ - --secure-listen-address=:{{ .Values.prometheus.secureMetricsPort }}
+ - --upstream=http://$(METALLB_HOST):{{ .Values.prometheus.metricsPort }}/
+ - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
+ {{- if .Values.prometheus.speakerMetricsTLSSecret }}
+ - --tls-private-key-file=/etc/metrics/tls.key
+ - --tls-cert-file=/etc/metrics/tls.crt
+ {{- end }}
+ ports:
+ - containerPort: {{ .Values.prometheus.secureMetricsPort }}
+ name: metricshttps
+ env:
+ - name: METALLB_HOST
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ resources:
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ terminationMessagePolicy: FallbackToLogsOnError
+ {{- if .Values.prometheus.speakerMetricsTLSSecret }}
+ volumeMounts:
+ - name: metrics-certs
+ mountPath: /etc/metrics
+ readOnly: true
+ {{- end }}
+ {{ end }}
+ {{- if .Values.speaker.frr.secureMetricsPort }}
+ - name: kube-rbac-proxy-frr
+ image: {{ .Values.prometheus.rbacProxy.repository }}:{{ .Values.prometheus.rbacProxy.tag | default .Chart.AppVersion }}
+ imagePullPolicy: {{ .Values.prometheus.rbacProxy.pullPolicy }}
+ args:
+ - --logtostderr
+ - --secure-listen-address=:{{ .Values.speaker.frr.secureMetricsPort }}
+ - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
+ - --upstream=http://$(METALLB_HOST):{{ .Values.speaker.frr.metricsPort }}/
+ {{- if .Values.prometheus.speakerMetricsTLSSecret }}
+ - --tls-private-key-file=/etc/metrics/tls.key
+ - --tls-cert-file=/etc/metrics/tls.crt
+ {{- end }}
+ ports:
+ - containerPort: {{ .Values.speaker.frr.secureMetricsPort }}
+ name: metricshttps
+ env:
+ - name: METALLB_HOST
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ resources:
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ terminationMessagePolicy: FallbackToLogsOnError
+ {{- if .Values.prometheus.speakerMetricsTLSSecret }}
+ volumeMounts:
+ - name: metrics-certs
+ mountPath: /etc/metrics
+ readOnly: true
+ {{- end }}
+ {{ end }}
+ nodeSelector:
+ "kubernetes.io/os": linux
+ {{- with .Values.speaker.nodeSelector }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.speaker.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- if or .Values.speaker.tolerateMaster .Values.speaker.tolerations }}
+ tolerations:
+ {{- if .Values.speaker.tolerateMaster }}
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ operator: Exists
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+ operator: Exists
+ {{- end }}
+ {{- with .Values.speaker.tolerations }}
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ {{- end }}
+ {{- with .Values.speaker.priorityClassName }}
+ priorityClassName: {{ . | quote }}
+ {{- end }}
+{{- end }}
diff --git a/charts/metallb/templates/webhooks.yaml b/charts/metallb/templates/webhooks.yaml
new file mode 100644
index 0000000..3b587a4
--- /dev/null
+++ b/charts/metallb/templates/webhooks.yaml
@@ -0,0 +1,170 @@
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ name: metallb-webhook-configuration
+ labels:
+ {{- include "metallb.labels" . | nindent 4 }}
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: metallb-webhook-service
+ namespace: {{ .Release.Namespace }}
+ path: /validate-metallb-io-v1beta1-addresspool
+ failurePolicy: {{ .Values.crds.validationFailurePolicy }}
+ name: addresspoolvalidationwebhook.metallb.io
+ rules:
+ - apiGroups:
+ - metallb.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - addresspools
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: metallb-webhook-service
+ namespace: {{ .Release.Namespace }}
+ path: /validate-metallb-io-v1beta2-bgppeer
+ failurePolicy: {{ .Values.crds.validationFailurePolicy }}
+ name: bgppeervalidationwebhook.metallb.io
+ rules:
+ - apiGroups:
+ - metallb.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - bgppeers
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: metallb-webhook-service
+ namespace: {{ .Release.Namespace }}
+ path: /validate-metallb-io-v1beta1-ipaddresspool
+ failurePolicy: {{ .Values.crds.validationFailurePolicy }}
+ name: ipaddresspoolvalidationwebhook.metallb.io
+ rules:
+ - apiGroups:
+ - metallb.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - ipaddresspools
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: metallb-webhook-service
+ namespace: {{ .Release.Namespace }}
+ path: /validate-metallb-io-v1beta1-bgpadvertisement
+ failurePolicy: {{ .Values.crds.validationFailurePolicy }}
+ name: bgpadvertisementvalidationwebhook.metallb.io
+ rules:
+ - apiGroups:
+ - metallb.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - bgpadvertisements
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: metallb-webhook-service
+ namespace: {{ .Release.Namespace }}
+ path: /validate-metallb-io-v1beta1-community
+ failurePolicy: {{ .Values.crds.validationFailurePolicy }}
+ name: communityvalidationwebhook.metallb.io
+ rules:
+ - apiGroups:
+ - metallb.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - communities
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: metallb-webhook-service
+ namespace: {{ .Release.Namespace }}
+ path: /validate-metallb-io-v1beta1-bfdprofile
+ failurePolicy: {{ .Values.crds.validationFailurePolicy }}
+ name: bfdprofilevalidationwebhook.metallb.io
+ rules:
+ - apiGroups:
+ - metallb.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ - DELETE
+ resources:
+ - bfdprofiles
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: metallb-webhook-service
+ namespace: {{ .Release.Namespace }}
+ path: /validate-metallb-io-v1beta1-l2advertisement
+ failurePolicy: {{ .Values.crds.validationFailurePolicy }}
+ name: l2advertisementvalidationwebhook.metallb.io
+ rules:
+ - apiGroups:
+ - metallb.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - l2advertisements
+ sideEffects: None
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: metallb-webhook-service
+ namespace: {{ .Release.Namespace | quote }}
+ labels:
+ {{- include "metallb.labels" . | nindent 4 }}
+spec:
+ ports:
+ - port: 443
+ targetPort: 9443
+ selector:
+ {{- include "metallb.selectorLabels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: webhook-server-cert
+ namespace: {{ .Release.Namespace | quote }}
+ labels:
+ {{- include "metallb.labels" . | nindent 4 }}
diff --git a/charts/metallb/values.schema.json b/charts/metallb/values.schema.json
new file mode 100644
index 0000000..5a92e56
--- /dev/null
+++ b/charts/metallb/values.schema.json
@@ -0,0 +1,427 @@
+{
+ "$schema": "https://json-schema.org/draft-07/schema#",
+ "title": "Values",
+ "type": "object",
+ "definitions": {
+ "prometheusAlert": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean"
+ },
+ "labels": {
+ "type": "object",
+ "additionalProperties": { "type": "string" }
+ }
+ },
+ "required": [ "enabled" ]
+ },
+ "probe": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean"
+ },
+ "failureThreshold": {
+ "type": "integer"
+ },
+ "initialDelaySeconds": {
+ "type": "integer"
+ },
+ "periodSeconds": {
+ "type": "integer"
+ },
+ "successThreshold": {
+ "type": "integer"
+ },
+ "timeoutSeconds": {
+ "type": "integer"
+ }
+ },
+ "required": [
+ "failureThreshold",
+ "initialDelaySeconds",
+ "periodSeconds",
+ "successThreshold",
+ "timeoutSeconds"
+ ]
+ },
+ "component": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean"
+ },
+ "logLevel": {
+ "type": "string",
+ "enum": [ "all", "debug", "info", "warn", "error", "none" ]
+ },
+ "image": {
+ "type": "object",
+ "properties": {
+ "repository": {
+ "type": "string"
+ },
+ "tag": {
+ "anyOf": [
+ { "type": "string" },
+ { "type": "null" }
+ ]
+ },
+ "pullPolicy": {
+ "anyOf": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "string",
+ "enum": [ "Always", "IfNotPresent", "Never" ]
+ }
+ ]
+ }
+ }
+ },
+ "serviceAccount": {
+ "type": "object",
+ "properties": {
+ "create": {
+ "type": "boolean"
+ },
+ "name": {
+ "type": "string"
+ },
+ "annotations": {
+ "type": "object"
+ }
+ }
+ },
+ "resources": {
+ "type": "object"
+ },
+ "nodeSelector": {
+ "type": "object"
+ },
+ "tolerations": {
+ "type": "array",
+ "items": {
+ "type": "object"
+ }
+ },
+ "priorityClassName": {
+ "type":"string"
+ },
+ "runtimeClassName": {
+ "type":"string"
+ },
+ "affinity": {
+ "type": "object"
+ },
+ "podAnnotations": {
+ "type": "object"
+ },
+ "livenessProbe": {
+ "$ref": "#/definitions/probe"
+ },
+ "readinessProbe": {
+ "$ref": "#/definitions/probe"
+ }
+ },
+ "required": [
+ "image",
+ "serviceAccount"
+ ]
+ }
+ },
+ "properties": {
+ "imagePullSecrets": {
+ "description": "Secrets used for pulling images",
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string"
+ }
+ },
+ "required": [ "name" ],
+ "additionalProperties": false
+ }
+ },
+ "nameOverride": {
+ "description": "Override chart name",
+ "type": "string"
+ },
+ "fullNameOverride": {
+ "description": "Override fully qualified app name",
+ "type": "string"
+ },
+ "configInLine": {
+ "description": "MetalLB configuration",
+ "type": "object"
+ },
+ "loadBalancerClass": {
+ "type":"string"
+ },
+ "rbac": {
+ "description": "RBAC configuration",
+ "type": "object",
+ "properties": {
+ "create": {
+ "description": "Enable RBAC",
+ "type": "boolean"
+ }
+ }
+ },
+ "prometheus": {
+ "description": "Prometheus monitoring config",
+ "type": "object",
+ "properties": {
+ "scrapeAnnotations": { "type": "boolean" },
+ "metricsPort": { "type": "integer" },
+ "secureMetricsPort": { "type": "integer" },
+ "rbacPrometheus": { "type": "boolean" },
+ "serviceAccount": { "type": "string" },
+ "namespace": { "type": "string" },
+ "rbacProxy": {
+ "description": "kube-rbac-proxy configuration",
+ "type": "object",
+ "properties": {
+ "repository": { "type": "string" },
+ "tag": { "type": "string" }
+ }
+ },
+ "podMonitor": {
+ "description": "Prometheus Operator PodMonitors",
+ "type": "object",
+ "properties": {
+ "enabled": { "type": "boolean" },
+ "additionalMonitors": { "type": "object" },
+ "jobLabel": { "type": "string" },
+ "interval": {
+ "anyOf": [
+ { "type": "integer" },
+ { "type": "null" }
+ ]
+ },
+ "metricRelabelings": {
+ "type": "array",
+ "items": {
+ "type": "object"
+ }
+ },
+ "relabelings": {
+ "type": "array",
+ "items": {
+ "type": "object"
+ }
+ }
+ }
+ },
+ "serviceMonitor": {
+ "description": "Prometheus Operator ServiceMonitors",
+ "type": "object",
+ "properties": {
+ "enabled": { "type": "boolean" },
+ "jobLabel": { "type": "string" },
+ "interval": {
+ "anyOf": [
+ { "type": "integer" },
+ { "type": "null" }
+ ]
+ },
+ "metricRelabelings": {
+ "type": "array",
+ "items": {
+ "type": "object"
+ }
+ },
+ "relabelings": {
+ "type": "array",
+ "items": {
+ "type": "object"
+ }
+ }
+ }
+ },
+ "prometheusRule": {
+ "description": "Prometheus Operator alertmanager alerts",
+ "type": "object",
+ "properties": {
+ "enabled": { "type": "boolean" },
+ "additionalMonitors": { "type": "object" },
+ "staleConfig": { "$ref": "#/definitions/prometheusAlert" },
+ "configNotLoaded": { "$ref": "#/definitions/prometheusAlert" },
+ "addressPoolExhausted": { "$ref": "#/definitions/prometheusAlert" },
+ "addressPoolUsage": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean"
+ },
+ "thresholds": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "percent": {
+ "type": "integer",
+ "minimum": 0,
+ "maximum": 100
+ },
+ "labels": {
+ "type": "object",
+ "additionalProperties": { "type": "string" }
+ }
+ },
+ "required": [ "percent" ]
+ }
+ }
+ },
+ "required": [ "enabled" ]
+ },
+ "bgpSessionDown": { "$ref": "#/definitions/prometheusAlert" },
+ "extraAlerts": {
+ "type": "array",
+ "items": {
+ "type": "object"
+ }
+ }
+ },
+ "required": [
+ "enabled",
+ "staleConfig",
+ "configNotLoaded",
+ "addressPoolExhausted",
+ "addressPoolUsage",
+ "bgpSessionDown"
+ ]
+ }
+ },
+ "required": [ "podMonitor", "prometheusRule" ]
+ },
+ "speaker": {
+ "allOf": [
+ { "$ref": "#/definitions/component" },
+ { "description": "MetalLB Speaker",
+ "type": "object",
+ "properties": {
+ "tolerateMaster": {
+ "type": "boolean"
+ },
+ "memberlist": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean"
+ },
+ "mlBindPort": {
+ "type": "integer"
+ },
+ "mlSecretKeyPath": {
+ "type": "string"
+ }
+ }
+ },
+ "excludeInterfaces": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean"
+ }
+ }
+ },
+ "updateStrategy": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string"
+ }
+ },
+ "required": [ "type" ]
+ },
+ "runtimeClassName": {
+ "type": "string"
+ },
+ "secretName": {
+ "type": "string"
+ },
+ "frr": {
+ "description": "Install FRR container in speaker deployment",
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean"
+ },
+ "image": { "$ref": "#/definitions/component/properties/image" },
+ "metricsPort": { "type": "integer" },
+ "secureMetricsPort": { "type": "integer" },
+ "resources:": { "type": "object" }
+ },
+ "required": [ "enabled" ]
+ },
+ "command" : {
+ "type": "string"
+ },
+ "reloader": {
+ "type": "object",
+ "properties": {
+ "resources": { "type": "object" }
+ }
+ },
+ "frrMetrics": {
+ "type": "object",
+ "properties": {
+ "resources": { "type": "object" }
+ }
+ }
+ },
+ "required": [ "tolerateMaster" ]
+ }
+ ]
+ },
+ "crds": {
+ "description": "CRD configuration",
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "description": "Enable CRDs",
+ "type": "boolean"
+ },
+ "validationFailurePolicy": {
+ "description": "Failure policy to use with validating webhooks",
+ "type": "string",
+ "enum": [ "Ignore", "Fail" ]
+ }
+ }
+ }
+ },
+ "controller": {
+ "allOf": [
+ { "$ref": "#/definitions/component" },
+ { "description": "MetalLB Controller",
+ "type": "object",
+ "properties": {
+ "strategy": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string"
+ }
+ },
+ "required": [ "type" ]
+ },
+ "command" : {
+ "type": "string"
+ },
+ "webhookMode" : {
+ "type": "string"
+ }
+ }
+ }
+ ]
+ },
+ "required": [
+ "controller",
+ "speaker"
+ ]
+}
diff --git a/charts/metallb/values.yaml b/charts/metallb/values.yaml
new file mode 100644
index 0000000..be8cf11
--- /dev/null
+++ b/charts/metallb/values.yaml
@@ -0,0 +1,342 @@
+# Default values for metallb.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+imagePullSecrets: []
+nameOverride: ""
+fullnameOverride: ""
+loadBalancerClass: ""
+
+# To configure MetalLB, you must specify ONE of the following two
+# options.
+
+rbac:
+ # create specifies whether to install and use RBAC rules.
+ create: true
+
+prometheus:
+ # scrape annotations specifies whether to add Prometheus metric
+ # auto-collection annotations to pods. See
+ # https://github.com/prometheus/prometheus/blob/release-2.1/documentation/examples/prometheus-kubernetes.yml
+ # for a corresponding Prometheus configuration. Alternatively, you
+ # may want to use the Prometheus Operator
+ # (https://github.com/coreos/prometheus-operator) for more powerful
+ # monitoring configuration. If you use the Prometheus operator, this
+ # can be left at false.
+ scrapeAnnotations: false
+
+ # port both controller and speaker will listen on for metrics
+ metricsPort: 7472
+
+ # if set, enables rbac proxy on the controller and speaker to expose
+ # the metrics via tls.
+ # secureMetricsPort: 9120
+
+ # the name of the secret to be mounted in the speaker pod
+ # to expose the metrics securely. If not present, a self signed
+ # certificate to be used.
+ speakerMetricsTLSSecret: ""
+
+ # the name of the secret to be mounted in the controller pod
+ # to expose the metrics securely. If not present, a self signed
+ # certificate to be used.
+ controllerMetricsTLSSecret: ""
+
+ # prometheus doens't have the permission to scrape all namespaces so we give it permission to scrape metallb's one
+ rbacPrometheus: true
+
+ # the service account used by prometheus
+ # required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true "
+ serviceAccount: ""
+
+ # the namespace where prometheus is deployed
+ # required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true "
+ namespace: ""
+
+ # the image to be used for the kuberbacproxy container
+ rbacProxy:
+ repository: gcr.io/kubebuilder/kube-rbac-proxy
+ tag: v0.12.0
+ pullPolicy:
+
+ # Prometheus Operator PodMonitors
+ podMonitor:
+ # enable support for Prometheus Operator
+ enabled: false
+
+ # optional additionnal labels for podMonitors
+ additionalLabels: {}
+
+ # optional annotations for podMonitors
+ annotations: {}
+
+ # Job label for scrape target
+ jobLabel: "app.kubernetes.io/name"
+
+ # Scrape interval. If not set, the Prometheus default scrape interval is used.
+ interval:
+
+ # metric relabel configs to apply to samples before ingestion.
+ metricRelabelings: []
+ # - action: keep
+ # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
+ # sourceLabels: [__name__]
+
+ # relabel configs to apply to samples before ingestion.
+ relabelings: []
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # target_label: nodename
+ # replacement: $1
+ # action: replace
+
+ # Prometheus Operator ServiceMonitors. To be used as an alternative
+ # to podMonitor, supports secure metrics.
+ serviceMonitor:
+ # enable support for Prometheus Operator
+ enabled: false
+
+ speaker:
+ # optional additional labels for the speaker serviceMonitor
+ additionalLabels: {}
+ # optional additional annotations for the speaker serviceMonitor
+ annotations: {}
+ # optional tls configuration for the speaker serviceMonitor, in case
+ # secure metrics are enabled.
+ tlsConfig:
+ insecureSkipVerify: true
+
+ controller:
+ # optional additional labels for the controller serviceMonitor
+ additionalLabels: {}
+ # optional additional annotations for the controller serviceMonitor
+ annotations: {}
+ # optional tls configuration for the controller serviceMonitor, in case
+ # secure metrics are enabled.
+ tlsConfig:
+ insecureSkipVerify: true
+
+ # Job label for scrape target
+ jobLabel: "app.kubernetes.io/name"
+
+ # Scrape interval. If not set, the Prometheus default scrape interval is used.
+ interval:
+
+ # metric relabel configs to apply to samples before ingestion.
+ metricRelabelings: []
+ # - action: keep
+ # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
+ # sourceLabels: [__name__]
+
+ # relabel configs to apply to samples before ingestion.
+ relabelings: []
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # target_label: nodename
+ # replacement: $1
+ # action: replace
+
+ # Prometheus Operator alertmanager alerts
+ prometheusRule:
+ # enable alertmanager alerts
+ enabled: false
+
+ # optional additionnal labels for prometheusRules
+ additionalLabels: {}
+
+ # optional annotations for prometheusRules
+ annotations: {}
+
+ # MetalLBStaleConfig
+ staleConfig:
+ enabled: true
+ labels:
+ severity: warning
+
+ # MetalLBConfigNotLoaded
+ configNotLoaded:
+ enabled: true
+ labels:
+ severity: warning
+
+ # MetalLBAddressPoolExhausted
+ addressPoolExhausted:
+ enabled: true
+ labels:
+ severity: alert
+
+ addressPoolUsage:
+ enabled: true
+ thresholds:
+ - percent: 75
+ labels:
+ severity: warning
+ - percent: 85
+ labels:
+ severity: warning
+ - percent: 95
+ labels:
+ severity: alert
+
+ # MetalLBBGPSessionDown
+ bgpSessionDown:
+ enabled: true
+ labels:
+ severity: alert
+
+ extraAlerts: []
+
+# controller contains configuration specific to the MetalLB cluster
+# controller.
+controller:
+ enabled: true
+ # -- Controller log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none`
+ logLevel: info
+ # command: /controller
+ # webhookMode: enabled
+ image:
+ repository: quay.io/metallb/controller
+ tag:
+ pullPolicy:
+ ## @param controller.updateStrategy.type Metallb controller deployment strategy type.
+ ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
+ ## e.g:
+ ## strategy:
+ ## type: RollingUpdate
+ ## rollingUpdate:
+ ## maxSurge: 25%
+ ## maxUnavailable: 25%
+ ##
+ strategy:
+ type: RollingUpdate
+ serviceAccount:
+ # Specifies whether a ServiceAccount should be created
+ create: true
+ # The name of the ServiceAccount to use. If not set and create is
+ # true, a name is generated using the fullname template
+ name: ""
+ annotations: {}
+ securityContext:
+ runAsNonRoot: true
+ # nobody
+ runAsUser: 65534
+ fsGroup: 65534
+ resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 100Mi
+ nodeSelector: {}
+ tolerations: []
+ priorityClassName: ""
+ runtimeClassName: ""
+ affinity: {}
+ podAnnotations: {}
+ labels: {}
+ livenessProbe:
+ enabled: true
+ failureThreshold: 3
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ readinessProbe:
+ enabled: true
+ failureThreshold: 3
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+
+# speaker contains configuration specific to the MetalLB speaker
+# daemonset.
+speaker:
+ enabled: true
+ # command: /speaker
+ # -- Speaker log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none`
+ logLevel: info
+ tolerateMaster: true
+ memberlist:
+ enabled: true
+ mlBindPort: 7946
+ mlSecretKeyPath: "/etc/ml_secret_key"
+ excludeInterfaces:
+ enabled: true
+ image:
+ repository: quay.io/metallb/speaker
+ tag:
+ pullPolicy:
+ ## @param speaker.updateStrategy.type Speaker daemonset strategy type
+ ## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
+ ##
+ updateStrategy:
+ ## StrategyType
+ ## Can be set to RollingUpdate or OnDelete
+ ##
+ type: RollingUpdate
+ serviceAccount:
+ # Specifies whether a ServiceAccount should be created
+ create: true
+ # The name of the ServiceAccount to use. If not set and create is
+ # true, a name is generated using the fullname template
+ name: ""
+ annotations: {}
+ ## Defines a secret name for the controller to generate a memberlist encryption secret
+ ## By default secretName: {{ "metallb.fullname" }}-memberlist
+ ##
+ # secretName:
+ resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 100Mi
+ nodeSelector: {}
+ tolerations: []
+ priorityClassName: ""
+ affinity: {}
+ ## Selects which runtime class will be used by the pod.
+ runtimeClassName: ""
+ podAnnotations: {}
+ labels: {}
+ livenessProbe:
+ enabled: true
+ failureThreshold: 3
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ readinessProbe:
+ enabled: true
+ failureThreshold: 3
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ startupProbe:
+ enabled: true
+ failureThreshold: 30
+ periodSeconds: 5
+ # frr contains configuration specific to the MetalLB FRR container,
+ # for speaker running alongside FRR.
+ frr:
+ enabled: true
+ image:
+ repository: quay.io/frrouting/frr
+ tag: 8.5.2
+ pullPolicy:
+ metricsPort: 7473
+ resources: {}
+
+ # if set, enables a rbac proxy sidecar container on the speaker to
+ # expose the frr metrics via tls.
+ # secureMetricsPort: 9121
+
+ reloader:
+ resources: {}
+
+ frrMetrics:
+ resources: {}
+
+crds:
+ enabled: true
+ validationFailurePolicy: Fail
diff --git a/charts/namespace/.helmignore b/charts/namespace/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/namespace/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/namespace/Chart.yaml b/charts/namespace/Chart.yaml
new file mode 100644
index 0000000..9dd3c96
--- /dev/null
+++ b/charts/namespace/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: namespaces
+description: A Helm chart for creating PCloud namespaces
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/namespace/templates/namespace.yaml b/charts/namespace/templates/namespace.yaml
new file mode 100644
index 0000000..58d5d46
--- /dev/null
+++ b/charts/namespace/templates/namespace.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ .Values.namespace }}
+ labels:
+ {{ range .Values.labels }}
+ {{ . }}
+ {{ end }}
+ # annotations:
+ # helm.sh/resource-policy: keep
+
diff --git a/charts/namespace/values.yaml b/charts/namespace/values.yaml
new file mode 100644
index 0000000..4c412e6
--- /dev/null
+++ b/charts/namespace/values.yaml
@@ -0,0 +1,4 @@
+name: example
+labels:
+- foo
+- bar
diff --git a/charts/namespaces/.helmignore b/charts/namespaces/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/namespaces/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/namespaces/Chart.yaml b/charts/namespaces/Chart.yaml
new file mode 100644
index 0000000..9dd3c96
--- /dev/null
+++ b/charts/namespaces/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: namespaces
+description: A Helm chart for creating PCloud namespaces
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/namespaces/templates/namespace.yaml b/charts/namespaces/templates/namespace.yaml
new file mode 100644
index 0000000..c7dfeb8
--- /dev/null
+++ b/charts/namespaces/templates/namespace.yaml
@@ -0,0 +1,15 @@
+{{ $prefix := .Values.namespacePrefix }}
+{{ $id := .Values.pcloudInstanceId }}
+{{ range .Values.namespaces }}
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ $prefix }}{{ . }}
+ {{ if $id }}
+ labels:
+ pcloud-instance-id: {{ $id }}
+ {{ end }}
+ annotations:
+ helm.sh/resource-policy: keep
+---
+{{ end }}
diff --git a/charts/namespaces/values.yaml b/charts/namespaces/values.yaml
new file mode 100644
index 0000000..9cb3886
--- /dev/null
+++ b/charts/namespaces/values.yaml
@@ -0,0 +1,5 @@
+pcloudInstanceId: example
+namespacePrefix: example-
+namespaces:
+- foo
+- bar
diff --git a/charts/nebula/.helmignore b/charts/nebula/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/nebula/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/nebula/Chart.yaml b/charts/nebula/Chart.yaml
new file mode 100644
index 0000000..edddb74
--- /dev/null
+++ b/charts/nebula/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: nebula
+description: A Helm chart for Nebula controller
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/nebula/crds/nebula-ca.yaml b/charts/nebula/crds/nebula-ca.yaml
new file mode 100644
index 0000000..c8de194
--- /dev/null
+++ b/charts/nebula/crds/nebula-ca.yaml
@@ -0,0 +1,83 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: nebulacas.lekva.me
+spec:
+ group: lekva.me
+ scope: Namespaced
+ names:
+ kind: NebulaCA
+ listKind: NebulaCAList
+ plural: nebulacas
+ singular: nebulaca
+ shortNames:
+ - nca
+ - ncas
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ spec:
+ type: object
+ properties:
+ secretName:
+ type: string
+ status:
+ type: object
+ properties:
+ state:
+ type: string
+ message:
+ type: string
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: nebulanodes.lekva.me
+spec:
+ group: lekva.me
+ scope: Namespaced
+ names:
+ kind: NebulaNode
+ listKind: NebulaNodeList
+ plural: nebulanodes
+ singular: nebulanode
+ shortNames:
+ - nnode
+ - nnodes
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ spec:
+ type: object
+ properties:
+ caName:
+ type: string
+ caNamespace:
+ type: string
+ ipCidr:
+ type: string
+ pubKey:
+ type: string
+ secretName:
+ type: string
+ status:
+ type: object
+ properties:
+ state:
+ type: string
+ message:
+ type: string
diff --git a/charts/nebula/crds/nebula-node.yaml b/charts/nebula/crds/nebula-node.yaml
new file mode 100644
index 0000000..ddb79de
--- /dev/null
+++ b/charts/nebula/crds/nebula-node.yaml
@@ -0,0 +1,47 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: nebulanodes.lekva.me
+spec:
+ group: lekva.me
+ scope: Namespaced
+ names:
+ kind: NebulaNode
+ listKind: NebulaNodeList
+ plural: nebulanodes
+ singular: nebulanode
+ shortNames:
+ - nnode
+ - nnodes
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ spec:
+ type: object
+ properties:
+ caName:
+ type: string
+ caNamespace:
+ type: string
+ ipCidr:
+ type: string
+ pubKey:
+ type: string
+ encPubKey:
+ type: string
+ secretName:
+ type: string
+ status:
+ type: object
+ properties:
+ state:
+ type: string
+ message:
+ type: string
diff --git a/charts/nebula/templates/api.yaml b/charts/nebula/templates/api.yaml
new file mode 100644
index 0000000..a1ec876
--- /dev/null
+++ b/charts/nebula/templates/api.yaml
@@ -0,0 +1,53 @@
+# apiVersion: v1
+# kind: Service
+# metadata:
+# name: nebula-api
+# namespace: {{ .Release.Namespace }}
+# spec:
+# type: LoadBalancer
+# selector:
+# app: nebula-api
+# ports:
+# - name: http
+# port: 80
+# targetPort: http
+# protocol: TCP
+# ---
+# apiVersion: apps/v1
+# kind: Deployment
+# metadata:
+# name: nebula-api
+# namespace: {{ .Release.Namespace }}
+# spec:
+# selector:
+# matchLabels:
+# app: nebula-api
+# replicas: 1
+# template:
+# metadata:
+# labels:
+# app: nebula-api
+# spec:
+# containers:
+# - name: web
+# image: {{ .Values.manage.image.repository }}:{{ .Values.manage.image.tag }}
+# imagePullPolicy: {{ .Values.manage.image.pullPolicy }}
+# ports:
+# - name: http
+# containerPort: 8080
+# protocol: TCP
+# command:
+# - nebula-api
+# - --port=8080
+# resources:
+# requests:
+# memory: "10Mi"
+# cpu: "10m"
+# limits:
+# memory: "20Mi"
+# cpu: "100m"
+# tolerations:
+# - key: "pcloud"
+# operator: "Equal"
+# value: "role"
+# effect: "NoSchedule"
diff --git a/charts/nebula/templates/controller.yaml b/charts/nebula/templates/controller.yaml
new file mode 100644
index 0000000..495539d
--- /dev/null
+++ b/charts/nebula/templates/controller.yaml
@@ -0,0 +1,69 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nebula-controller
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: nebula-controller
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: nebula-controller
+ spec:
+ containers:
+ - name: controller
+ image: {{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag }}
+ imagePullPolicy: {{ .Values.controller.image.pullPolicy }}
+ command:
+ - nebula-controller
+ tolerations:
+ - key: "pcloud"
+ operator: "Equal"
+ value: "role"
+ effect: "NoSchedule"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ .Release.Namespace }}-nebula-controller
+ namespace: {{ .Release.Namespace }}
+rules:
+- apiGroups:
+ - "lekva.me"
+ resources:
+ - nebulacas
+ - nebulacas/status
+ - nebulanodes
+ - nebulanodes/status
+ verbs:
+ - list
+ - get
+ - create
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - list
+ - get
+ - create
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ .Release.Namespace }}-nebula-controller
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ .Release.Namespace }}-nebula-controller
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: {{ .Release.Namespace }}
diff --git a/charts/nebula/values.yaml b/charts/nebula/values.yaml
new file mode 100644
index 0000000..7aa742f
--- /dev/null
+++ b/charts/nebula/values.yaml
@@ -0,0 +1,10 @@
+controller:
+ image:
+ repository: giolekva/nebula-controller
+ tag: latest
+ pullPolicy: Always
+manage:
+ image:
+ repository: giolekva/nebula-api
+ tag: latest
+ pullPolicy: Always
diff --git a/charts/oauth2-client/.helmignore b/charts/oauth2-client/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/oauth2-client/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/oauth2-client/Chart.yaml b/charts/oauth2-client/Chart.yaml
new file mode 100644
index 0000000..535f1bc
--- /dev/null
+++ b/charts/oauth2-client/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: oauth2-client
+description: A Helm chart for creating PCloud OAuth2 clients
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/oauth2-client/templates/oauth2-client.yaml b/charts/oauth2-client/templates/oauth2-client.yaml
new file mode 100644
index 0000000..81b7d96
--- /dev/null
+++ b/charts/oauth2-client/templates/oauth2-client.yaml
@@ -0,0 +1,17 @@
+apiVersion: hydra.ory.sh/v1alpha1
+kind: OAuth2Client
+metadata:
+ name: {{ .Values.name }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ secretName: {{ .Values.secretName }}
+ grantTypes: {{ .Values.grantTypes }}
+ responseTypes: {{ .Values.reponseTypes }}
+ scope: {{ .Values.scope }}
+ redirectUris: {{ .Values.redirectUris }}
+ hydraAdmin:
+ endpoint: /admin/clients
+ forwardedProto: https
+ port: 80
+ url: {{ .Values.hydraAdmin }}
+ tokenEndpointAuthMethod: {{ .Values.tokenEndpointAuthMethod }}
diff --git a/charts/oauth2-client/values.yaml b/charts/oauth2-client/values.yaml
new file mode 100644
index 0000000..d9df00e
--- /dev/null
+++ b/charts/oauth2-client/values.yaml
@@ -0,0 +1,8 @@
+name: oauth2-client
+secretName: oauth2-credentials
+grantTypes: []
+responseTypes: []
+scope: ""
+redirectUris: []
+hydraAdmin: ""
+tokenEndpointAuthMethod: ""
diff --git a/charts/openproject/.helmignore b/charts/openproject/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/openproject/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/openproject/Chart.lock b/charts/openproject/Chart.lock
new file mode 100644
index 0000000..ca4abbb
--- /dev/null
+++ b/charts/openproject/Chart.lock
@@ -0,0 +1,12 @@
+dependencies:
+- name: postgresql
+ repository: https://charts.bitnami.com/bitnami
+ version: 12.12.10
+- name: memcached
+ repository: https://charts.bitnami.com/bitnami
+ version: 6.14.0
+- name: common
+ repository: https://charts.bitnami.com/bitnami
+ version: 2.19.1
+digest: sha256:0fde10adb357d4399cdda67e384bea2e34dcc16121ffa0e2d8d4dadd184346e1
+generated: "2024-03-28T12:35:19.4267304Z"
diff --git a/charts/openproject/Chart.yaml b/charts/openproject/Chart.yaml
new file mode 100644
index 0000000..09edf7b
--- /dev/null
+++ b/charts/openproject/Chart.yaml
@@ -0,0 +1,35 @@
+annotations:
+ artifacthub.io/license: GPL-3.0-only
+ artifacthub.io/links: |
+ - name: Homepage
+ url: https://www.openproject.org/
+ - name: OpenProject Community
+ url: https://community.openproject.org
+ - name: support
+ url: https://www.openproject.org/docs/development/report-a-bug/
+ artifacthub.io/signKey: |
+ fingerprint: CB1CA0488A75B7471EA1B087CF56DD6A0AE260E5
+ url: https://keys.openpgp.org/vks/v1/by-fingerprint/CB1CA0488A75B7471EA1B087CF56DD6A0AE260E5
+apiVersion: v2
+appVersion: "13"
+dependencies:
+- condition: postgresql.bundled
+ name: postgresql
+ repository: https://charts.bitnami.com/bitnami
+ version: ^12.1.6
+- condition: memcached.bundled
+ name: memcached
+ repository: https://charts.bitnami.com/bitnami
+ version: ^6.3.2
+- name: common
+ repository: https://charts.bitnami.com/bitnami
+ version: ^2.2.2
+description: A Helm chart for running OpenProject via Kubernetes
+home: https://www.openproject.org/
+icon: https://charts.openproject.org/logo.svg
+maintainers:
+- name: OpenProject
+ url: https://github.com/opf/helm-charts
+name: openproject
+type: application
+version: 5.1.3
diff --git a/charts/openproject/README.md b/charts/openproject/README.md
new file mode 100644
index 0000000..7d5d8b0
--- /dev/null
+++ b/charts/openproject/README.md
@@ -0,0 +1,365 @@
+# Helm chart for OpenProject
+
+This is the chart for OpenProject itself. It bootstraps an OpenProject instance, optionally with a PostgreSQL database and Memcached.
+
+## Prerequisites
+
+- Kubernetes 1.16+
+- Helm 3.0.0+
+- PV provisioner support in the underlying infrastructure
+
+
+## Helm chart Provenance and Integrity
+
+We sign our chart using the [Helm Provenance and Integrity](https://helm.sh/docs/topics/provenance/) functionality. You can find the used public key here
+
+- https://github.com/opf/helm-charts/blob/main/signing.key
+- https://keys.openpgp.org/vks/v1/by-fingerprint/CB1CA0488A75B7471EA1B087CF56DD6A0AE260E5
+
+We recommend using the [Helm GnuPG plugin](https://github.com/technosophos/helm-gpg). With it you can manually verify the signature like this:
+
+```bash
+helm repo add openproject https://charts.openproject.org
+helm fetch --prov openproject/openproject
+helm gpg verify openproject-*.tgz
+```
+
+## Installation
+
+### Quick start
+
+```shell
+helm repo add openproject https://charts.openproject.org
+helm upgrade --create-namespace --namespace openproject --install my-openproject openproject/openproject
+```
+
+You can also install the chart with the release name `my-openproject` in its own namespace like this:
+
+```shell
+helm upgrade --create-namespace --namespace openproject --install my-openproject openproject/openproject
+```
+
+The namespace is optional, but we recommend it as it does make it easier to manage the resources created for OpenProject.
+
+## Configuration
+
+Configuration of the chart takes place through defined values, and a catch-all entry `environment` to provide all possible variables through ENV that OpenProject supports. To get more information about the possible values, please see [our guide on environment variables](https://www.openproject.org/docs/installation-and-operations/configuration/environment/).
+
+
+
+### Available OpenProject specific helm values
+
+We try to map the most common options to chart values directly for ease of use. The most common ones are listed here, feel free to extend available values [through a pull request](https://github.com/opf/helm-charts/).
+
+
+
+**OpenProject image and version**
+
+By default, the helm chart will target the latest stable major release. You can define a custom [supported docker tag](https://hub.docker.com/r/openproject/community/) using `image.tag`. Override container registry and repository using `image.registry` and `image.repository`, respectively.
+
+Please make sure to use the `-slim` variant of OpenProject, as the all-in-one container is adding unnecessary services and will not work as expected with default options such as operating as a non-root user.
+
+
+
+**HTTPS mode**
+
+Regardless of the TLS mode of ingress, OpenProject needs to be told whether it's expected to run and return HTTPS responses (or generate correct links in mails, background jobs, etc.). If you're not running https, then set `openproject.https=false`.
+
+
+
+**Seed locale** (13.0+)
+
+By default, demo data and global names for types, statuses, etc. will be in English. If you wish to set a custom locale, set `openproject.seed_locale=XX`, where XX can be a two-character ISO code. For currently supported values, see the `OPENPROJECT_AVAILABLE__LANGUAGES` default value in the [environment guide](https://www.openproject.org/docs/installation-and-operations/configuration/environment/).
+
+
+
+**Admin user** (13.0+)
+
+By default, OpenProject generates an admin user with password `admin` which is required to change after first interactive login.
+If you're operating an automated deployment with fresh databases for testing, this default approach might not be desirable.
+
+You can customize the password as well as name, email, and whether a password change is enforced on first login with these variables:
+
+```ruby
+openproject.admin_user.password="my-secure-password"
+openproject.admin_user.password_reset="false"
+openproject.admin_user.name="Firstname Lastname"
+openproject.admin_user.mail="admin@example.com"
+```
+
+
+
+### ReadWriteMany volumes
+
+By default and when using filesystem-based attachments, OpenProject requires the Kubernetes cluster to support `ReadWriteMany` (rwx) volumes. This is due to the fact that multiple container instances need access to write to the attachment storage.
+
+To avoid using ReadWriteMany, you will need to configure an S3 compatible object storage instead which is shown in the [advanced configuration guide](https://www.openproject.org/docs/installation-and-operations/configuration/#attachments-storage).
+
+```
+persistence:
+ enabled: false
+
+s3:
+ enabled: true
+ accessKeyId:
+ # host:
+ # port:
+```
+
+
+
+### Updating the configuration
+
+The OpenProject configuration can be changed through environment variables.
+You can use `helm upgrade` to set individual values.
+
+For instance:
+
+```shell
+helm upgrade --reuse-values --namespace openproject my-openproject --set environment.OPENPROJECT_IMPRESSUM__LINK=https://www.openproject.org/legal/imprint/ --set environment.OPENPROJECT_APP__TITLE='My OpenProject'
+```
+
+Find out more about the [configuration through environment variables](https://www.openproject.org/docs/installation-and-operations/configuration/environment/) section.
+
+
+
+## Uninstalling the Chart
+
+To uninstall the release with the name my-openproject do the following:
+
+```shell
+helm uninstall --namespace openproject my-openproject
+```
+
+
+
+> **Note**: This will not remove the persistent volumes created while installing.
+> The easiest way to ensure all PVCs are deleted as well is to delete the openproject namespace
+> (`kubectl delete namespace openproject`). If you installed OpenProject into the default
+> namespace, you can delete the volumes manually one by one.
+
+
+
+## Troubleshooting
+
+### Web deployment stuck in `CrashLoopBackoff`
+
+Describing the pod may yield an error like the following:
+
+```
+65s) kubelet Error: failed to start container "openproject": Error response from daemon: failed to create shim task: OCI runtime create failed: runc create failed: unable to start container process: error during container init: error setting cgroup config for procHooks process: failed to write "400000": write /sys/fs/cgroup/cpu,cpuacct/kubepods/burstable/pod990fa25e-dbf0-4fb7-9b31-9d7106473813/openproject/cpu.cfs_quota_us: invalid argument: unknown
+```
+
+This can happen when using **minikube**. By default, it initialises the cluster with 2 CPUs only.
+
+Either increase the cluster's resources to have at least 4 CPUs or install the OpenProject helm chart with a reduced CPU limit by adding the following option to the install command:
+
+```shell
+--set resources.limits.cpu=2
+```
+
+## Development
+
+To install or update from this directory run the following command.
+
+```bash
+bin/install-dev
+```
+
+This will install the chart with `--set develop=true` which is recommended
+on local clusters such as **minikube** or **kind**.
+
+This will also set `OPENPROJECT_HTTPS` to false so no TLS certificate is required
+to access it.
+
+You can set other options just like when installing via `--set`
+(e.g. `bin/install-dev --set persistence.enabled=false`).
+
+### Debugging
+
+Changes to the chart can be debugged using the following.
+
+```bash
+bin/debug
+```
+
+This will try to render the templates and show any errors.
+You can set values just like when installing via `--set`
+(e.g. `bin/debug --set persistence.enabled=false`).
+
+## TLS
+
+Create a TLS certificate, e.g. using [mkcert](https://github.com/FiloSottile/mkcert).
+
+```
+mkcert helm-example.openproject-dev.com
+```
+
+Create the tls secret in kubernetes.
+
+```
+kubectl -n openproject create secret tls openproject-tls \
+ --key="helm-example.openproject-dev.com-key.pem" \
+ --cert="helm-example.openproject-dev.com.pem"
+```
+
+Set the tls secret value during installation or an upgrade by adding the following.
+
+```
+--set ingress.tls.enabled=true --set tls.secretName=openproject-tls
+```
+
+### Root CA
+
+If you want to add your own root CA for outgoing TLS connection, do the following.
+
+1. Put the certificate into a config map.
+
+```
+kubectl -n openproject-dev create configmap ca-pemstore --from-file=/path/to/rootCA.pem
+```
+
+To make OpenProject use this CA for outgoing TLS connection, set the following options.
+
+```
+ --set egress.tls.rootCA.configMap=ca-pemstore \
+ --set egress.tls.rootCA.fileName=rootCA.pem
+```
+
+## Secrets
+
+There are various sensitive credentials used by the chart.
+While they can be provided directly in the values (e.g. `--set postgresql.auth.password`),
+it is recommended to store them in secrets instead.
+
+You can create a new secret like this:
+
+```
+kubectl -n openproject create secret generic <name>
+```
+
+You can then edit the secret to add the credentials via the following.
+
+```
+kubectl -n openproject edit secret <name>
+```
+
+The newly created secret will look something like this:
+
+```
+apiVersion: v1
+kind: Secret
+metadata:
+ creationTimestamp: "2024-01-10T09:36:09Z"
+ name: <name>
+ namespace: openproject
+ resourceVersion: "1074377"
+ uid: ff6538cd-f8cb-418f-8cee-bd1e20d96d24
+type: Opaque
+```
+
+To add the actual content, you can simply add `stringData:` to the end of it and save it.
+
+The keys which are looked up inside the secret data can be changed from their defaults in the values as well. This is the same in all cases where next to `existingSecret` you can also set `secretKeys`.
+
+In the following sections we give examples for what this may look like using the default keys for the credentials used by OpenProject.
+
+### PostgreSQL
+
+```yaml
+stringData:
+ postgres-password: postgresPassword
+ password: userPassword
+```
+
+If you have an existing secret where the keys are not `postgres-password` and `password`, you can customize the used keys as mentioned above.
+
+For instance:
+
+```bash
+helm upgrade --create-namespace --namespace openproject --install openproject \
+ --set postgresql.auth.existingSecret=mysecret \
+ --set postgresql.auth.secretKeys.adminPasswordKey=adminpw \
+ --set postgresql.auth.secretKeys.userPasswordKey=userpw
+```
+
+This can be customized for the the credentials in the following sections too in the same fashion.
+You can look up the respective options in the [`values.yaml`](./values.yaml) file.
+
+#### Default passwords
+
+If you provide neither an existing secret nor passwords directly in the `values.yaml` file,
+the postgres chart will generate a secret automatically.
+
+This secret will contain both the user and admin passwords.
+You can print the base64 encoded passwords as follows.
+
+```
+kubectl get secret -n <namespace> openproject-postgresql -o yaml | grep password
+```
+
+### OIDC (OpenID Connect)
+
+```yaml
+stringData:
+ clientId: 7c6cc104-1d07-4a9f-b3fb-017da8577cec
+ clientSecret: Sf78Q~H14O7F2_EOS4NsLoxu-ayOm42i~MljMb44
+```
+
+
+
+**Sealed secrets**
+
+```bash
+kubectl create secret generic openproject-oidc-secret-sealed --from-literal=OPENPROJECT_OPENID__CONNECT_PROVIDERHERE_IDENTIFIER=xxxxx --from-literal=OPENPROJECT_OPENID__CONNECT_PROVIDERHERE_SECRET=xxxxx --dry-run=client -o yaml | kubeseal ...
+```
+
+Set `openproject.oidc.extraOidcSealedSecret="openproject-oidc-secret-sealed"` in your values.
+
+### S3
+
+```yaml
+stringData:
+ accessKeyId: AKIAXDF2JNZRBFQIRTKA
+ secretAccessKey: zwH7t0H3bJQf/TvlQpE7/Y59k9hD+nYNRlKUBpuq
+```
+
+## OpenShift
+
+For OpenProject to work in OpenShift without further adjustments,
+you need to use the following pod and container security context.
+
+```
+podSecurityContext:
+ supplementalGroups: [1000]
+ fsGroup: null
+
+containerSecurityContext:
+ runAsUser: null
+ runAsGroup: null
+```
+
+By default OpenProject requests `fsGroup: 1000` in the pod security context, and also `1000` for both `runAsUser` and `runAsGroup` in the container security context.
+You have to allow this using a custom SCC (Security Context Constraint) in the cluster. In this case you do not have to adjust the security contexts.
+But the easiest way is the use of the security contexts as shown above.
+
+Due to the default restrictions in OpenShift there may also be issues running
+PostgreSQL and memcached. Again, you may have to create an SCC to fix this
+or adjust the policies in the subcharts accordingly.
+
+Assuming no further options for both, simply disabling the security context values to use the default works as well.
+
+```
+postgresql:
+ primary:
+ containerSecurityContext:
+ enabled: false
+ podSecurityContext:
+ enabled: false
+
+memcached:
+ containerSecurityContext:
+ enabled: false
+ podSecurityContext:
+ enabled: false
+```
diff --git a/charts/openproject/RELEASE-NOTES.md b/charts/openproject/RELEASE-NOTES.md
new file mode 100644
index 0000000..d5da084
--- /dev/null
+++ b/charts/openproject/RELEASE-NOTES.md
@@ -0,0 +1,6 @@
+
+
+### Patch Changes
+
+- 35aba8b: fix(secret_s3): add quote around port
+
diff --git a/charts/openproject/bin/debug b/charts/openproject/bin/debug
new file mode 100644
index 0000000..0190310
--- /dev/null
+++ b/charts/openproject/bin/debug
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+# Outputs the generated helm configurations after templating.
+
+yaml_output=/tmp/op-hc-yaml-output.txt
+error_output=/tmp/op-hc-error-output.txt
+section_output=/tmp/op-hc-section-output.yml
+vimrc=/tmp/op-hc-vim-rc
+
+rm $yaml_output $error_output $section_output $vimrc &>/dev/null
+
+helm template --debug "$@" . 1> $yaml_output 2> $error_output
+
+if [ $? -gt 0 ]; then
+ section=`cat $error_output | grep 'Error: YAML parse error on' | cut -d: -f2 | cut -d' ' -f6-`
+
+ if [ -n "$section" ]; then
+ cat $yaml_output | sed -e "0,/\# Source: ${section//\//\\/}/d" | tail -n+2 | sed -e '/---/,$d' > $section_output
+
+ line=`cat $error_output | grep line | head -n1 | perl -nle 'm/line (\d+)/; print $1'`
+
+ if [ -n "$line" ]; then
+ echo "autocmd VimEnter * echo '`cat $error_output | grep line | head -n1`'" > $vimrc
+ vim +$line -u $vimrc $section_output
+ else
+ echo
+ echo "Template error: "
+ echo
+ echo ---
+ cat $section_output
+ cat $error_output
+ fi
+ else
+ echo
+ echo "Template error: "
+ echo
+ echo ---
+ cat $yaml_output
+ cat $error_output
+ fi
+else
+ cat $yaml_output
+
+ echo
+ echo "Syntax ok"
+fi
diff --git a/charts/openproject/bin/install-dev b/charts/openproject/bin/install-dev
new file mode 100644
index 0000000..b38d542
--- /dev/null
+++ b/charts/openproject/bin/install-dev
@@ -0,0 +1,6 @@
+# !/bin/bash
+
+# Install OpenProject in development mode, that is without https and allowing writes
+# to the container file system.
+
+helm upgrade --create-namespace --namespace openproject --install openproject --set develop=true "$@" .
diff --git a/charts/openproject/charts/common/.helmignore b/charts/openproject/charts/common/.helmignore
new file mode 100644
index 0000000..7c7c21d
--- /dev/null
+++ b/charts/openproject/charts/common/.helmignore
@@ -0,0 +1,24 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
+# img folder
+img/
diff --git a/charts/openproject/charts/common/Chart.yaml b/charts/openproject/charts/common/Chart.yaml
new file mode 100644
index 0000000..8d0e546
--- /dev/null
+++ b/charts/openproject/charts/common/Chart.yaml
@@ -0,0 +1,23 @@
+annotations:
+ category: Infrastructure
+ licenses: Apache-2.0
+apiVersion: v2
+appVersion: 2.19.1
+description: A Library Helm Chart for grouping common logic between bitnami charts.
+ This chart is not deployable by itself.
+home: https://bitnami.com
+icon: https://bitnami.com/downloads/logos/bitnami-mark.png
+keywords:
+- common
+- helper
+- template
+- function
+- bitnami
+maintainers:
+- name: VMware, Inc.
+ url: https://github.com/bitnami/charts
+name: common
+sources:
+- https://github.com/bitnami/charts
+type: library
+version: 2.19.1
diff --git a/charts/openproject/charts/common/README.md b/charts/openproject/charts/common/README.md
new file mode 100644
index 0000000..0d01a1e
--- /dev/null
+++ b/charts/openproject/charts/common/README.md
@@ -0,0 +1,235 @@
+# Bitnami Common Library Chart
+
+A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts.
+
+## TL;DR
+
+```yaml
+dependencies:
+ - name: common
+ version: 2.x.x
+ repository: oci://registry-1.docker.io/bitnamicharts
+```
+
+```console
+helm dependency update
+```
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.names.fullname" . }}
+data:
+ myvalue: "Hello World"
+```
+
+Looking to use our applications in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog.
+
+## Introduction
+
+This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
+
+## Prerequisites
+
+- Kubernetes 1.23+
+- Helm 3.8.0+
+
+## Parameters
+
+## Special input schemas
+
+### ImageRoot
+
+```yaml
+registry:
+ type: string
+ description: Docker registry where the image is located
+ example: docker.io
+
+repository:
+ type: string
+ description: Repository and image name
+ example: bitnami/nginx
+
+tag:
+ type: string
+ description: image tag
+ example: 1.16.1-debian-10-r63
+
+pullPolicy:
+ type: string
+ description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+
+pullSecrets:
+ type: array
+ items:
+ type: string
+ description: Optionally specify an array of imagePullSecrets (evaluated as templates).
+
+debug:
+ type: boolean
+ description: Set to true if you would like to see extra information on logs
+ example: false
+
+## An instance would be:
+# registry: docker.io
+# repository: bitnami/nginx
+# tag: 1.16.1-debian-10-r63
+# pullPolicy: IfNotPresent
+# debug: false
+```
+
+### Persistence
+
+```yaml
+enabled:
+ type: boolean
+ description: Whether enable persistence.
+ example: true
+
+storageClass:
+ type: string
+ description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning.
+ example: "-"
+
+accessMode:
+ type: string
+ description: Access mode for the Persistent Volume Storage.
+ example: ReadWriteOnce
+
+size:
+ type: string
+ description: Size the Persistent Volume Storage.
+ example: 8Gi
+
+path:
+ type: string
+ description: Path to be persisted.
+ example: /bitnami
+
+## An instance would be:
+# enabled: true
+# storageClass: "-"
+# accessMode: ReadWriteOnce
+# size: 8Gi
+# path: /bitnami
+```
+
+### ExistingSecret
+
+```yaml
+name:
+ type: string
+ description: Name of the existing secret.
+ example: mySecret
+keyMapping:
+ description: Mapping between the expected key name and the name of the key in the existing secret.
+ type: object
+
+## An instance would be:
+# name: mySecret
+# keyMapping:
+# password: myPasswordKey
+```
+
+#### Example of use
+
+When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets.
+
+```yaml
+# templates/secret.yaml
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ labels:
+ app: {{ include "common.names.fullname" . }}
+type: Opaque
+data:
+ password: {{ .Values.password | b64enc | quote }}
+
+# templates/dpl.yaml
+---
+...
+ env:
+ - name: PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }}
+ key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }}
+...
+
+# values.yaml
+---
+name: mySecret
+keyMapping:
+ password: myPasswordKey
+```
+
+### ValidateValue
+
+#### NOTES.txt
+
+```console
+{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}}
+{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}}
+
+{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
+```
+
+If we force those values to be empty we will see some alerts
+
+```console
+helm install test mychart --set path.to.value00="",path.to.value01=""
+ 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value:
+
+ export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d)
+
+ 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value:
+
+ export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d)
+```
+
+## Upgrading
+
+### To 1.0.0
+
+[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
+
+#### What changes were introduced in this major version?
+
+- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
+- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information.
+- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts
+
+#### Considerations when upgrading to this version
+
+- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues
+- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore
+- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3
+
+#### Useful links
+
+- <https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/>
+- <https://helm.sh/docs/topics/v2_v3_migration/>
+- <https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/>
+
+## License
+
+Copyright © 2024 Broadcom. The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+<http://www.apache.org/licenses/LICENSE-2.0>
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/charts/openproject/charts/common/templates/_affinities.tpl b/charts/openproject/charts/common/templates/_affinities.tpl
new file mode 100644
index 0000000..e85b1df
--- /dev/null
+++ b/charts/openproject/charts/common/templates/_affinities.tpl
@@ -0,0 +1,139 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return a soft nodeAffinity definition
+{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.soft" -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: {{ .key }}
+ operator: In
+ values:
+ {{- range .values }}
+ - {{ . | quote }}
+ {{- end }}
+ weight: 1
+{{- end -}}
+
+{{/*
+Return a hard nodeAffinity definition
+{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.hard" -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: {{ .key }}
+ operator: In
+ values:
+ {{- range .values }}
+ - {{ . | quote }}
+ {{- end }}
+{{- end -}}
+
+{{/*
+Return a nodeAffinity definition
+{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes" -}}
+ {{- if eq .type "soft" }}
+ {{- include "common.affinities.nodes.soft" . -}}
+ {{- else if eq .type "hard" }}
+ {{- include "common.affinities.nodes.hard" . -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Return a topologyKey definition
+{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}}
+*/}}
+{{- define "common.affinities.topologyKey" -}}
+{{ .topologyKey | default "kubernetes.io/hostname" -}}
+{{- end -}}
+
+{{/*
+Return a soft podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.soft" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.soft" -}}
+{{- $component := default "" .component -}}
+{{- $customLabels := default (dict) .customLabels -}}
+{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
+{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 10 }}
+ {{- if not (empty $component) }}
+ {{ printf "app.kubernetes.io/component: %s" $component }}
+ {{- end }}
+ {{- range $key, $value := $extraMatchLabels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+ weight: 1
+ {{- range $extraPodAffinityTerms }}
+ - podAffinityTerm:
+ labelSelector:
+ matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 10 }}
+ {{- if not (empty $component) }}
+ {{ printf "app.kubernetes.io/component: %s" $component }}
+ {{- end }}
+ {{- range $key, $value := .extraMatchLabels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+ weight: {{ .weight | default 1 -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Return a hard podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.hard" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.hard" -}}
+{{- $component := default "" .component -}}
+{{- $customLabels := default (dict) .customLabels -}}
+{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
+{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 8 }}
+ {{- if not (empty $component) }}
+ {{ printf "app.kubernetes.io/component: %s" $component }}
+ {{- end }}
+ {{- range $key, $value := $extraMatchLabels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+ {{- range $extraPodAffinityTerms }}
+ - labelSelector:
+ matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 8 }}
+ {{- if not (empty $component) }}
+ {{ printf "app.kubernetes.io/component: %s" $component }}
+ {{- end }}
+ {{- range $key, $value := .extraMatchLabels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Return a podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.pods" -}}
+ {{- if eq .type "soft" }}
+ {{- include "common.affinities.pods.soft" . -}}
+ {{- else if eq .type "hard" }}
+ {{- include "common.affinities.pods.hard" . -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/common/templates/_capabilities.tpl b/charts/openproject/charts/common/templates/_capabilities.tpl
new file mode 100644
index 0000000..115674a
--- /dev/null
+++ b/charts/openproject/charts/common/templates/_capabilities.tpl
@@ -0,0 +1,229 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return the target Kubernetes version
+*/}}
+{{- define "common.capabilities.kubeVersion" -}}
+{{- if .Values.global }}
+ {{- if .Values.global.kubeVersion }}
+ {{- .Values.global.kubeVersion -}}
+ {{- else }}
+ {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
+ {{- end -}}
+{{- else }}
+{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for poddisruptionbudget.
+*/}}
+{{- define "common.capabilities.policy.apiVersion" -}}
+{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "policy/v1beta1" -}}
+{{- else -}}
+{{- print "policy/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for networkpolicy.
+*/}}
+{{- define "common.capabilities.networkPolicy.apiVersion" -}}
+{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for cronjob.
+*/}}
+{{- define "common.capabilities.cronjob.apiVersion" -}}
+{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "batch/v1beta1" -}}
+{{- else -}}
+{{- print "batch/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for daemonset.
+*/}}
+{{- define "common.capabilities.daemonset.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for deployment.
+*/}}
+{{- define "common.capabilities.deployment.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for statefulset.
+*/}}
+{{- define "common.capabilities.statefulset.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apps/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for ingress.
+*/}}
+{{- define "common.capabilities.ingress.apiVersion" -}}
+{{- if .Values.ingress -}}
+{{- if .Values.ingress.apiVersion -}}
+{{- .Values.ingress.apiVersion -}}
+{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end }}
+{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for RBAC resources.
+*/}}
+{{- define "common.capabilities.rbac.apiVersion" -}}
+{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "rbac.authorization.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "rbac.authorization.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for CRDs.
+*/}}
+{{- define "common.capabilities.crd.apiVersion" -}}
+{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiextensions.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiextensions.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for APIService.
+*/}}
+{{- define "common.capabilities.apiService.apiVersion" -}}
+{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiregistration.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiregistration.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for Horizontal Pod Autoscaler.
+*/}}
+{{- define "common.capabilities.hpa.apiVersion" -}}
+{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}}
+{{- if .beta2 -}}
+{{- print "autoscaling/v2beta2" -}}
+{{- else -}}
+{{- print "autoscaling/v2beta1" -}}
+{{- end -}}
+{{- else -}}
+{{- print "autoscaling/v2" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for Vertical Pod Autoscaler.
+*/}}
+{{- define "common.capabilities.vpa.apiVersion" -}}
+{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}}
+{{- if .beta2 -}}
+{{- print "autoscaling/v2beta2" -}}
+{{- else -}}
+{{- print "autoscaling/v2beta1" -}}
+{{- end -}}
+{{- else -}}
+{{- print "autoscaling/v2" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if PodSecurityPolicy is supported
+*/}}
+{{- define "common.capabilities.psp.supported" -}}
+{{- if semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .) -}}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if AdmissionConfiguration is supported
+*/}}
+{{- define "common.capabilities.admissionConfiguration.supported" -}}
+{{- if semverCompare ">=1.23-0" (include "common.capabilities.kubeVersion" .) -}}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for AdmissionConfiguration.
+*/}}
+{{- define "common.capabilities.admissionConfiguration.apiVersion" -}}
+{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiserver.config.k8s.io/v1alpha1" -}}
+{{- else if semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiserver.config.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiserver.config.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for PodSecurityConfiguration.
+*/}}
+{{- define "common.capabilities.podSecurityConfiguration.apiVersion" -}}
+{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "pod-security.admission.config.k8s.io/v1alpha1" -}}
+{{- else if semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "pod-security.admission.config.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "pod-security.admission.config.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if the used Helm version is 3.3+.
+A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure.
+This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error.
+**To be removed when the catalog's minimun Helm version is 3.3**
+*/}}
+{{- define "common.capabilities.supportsHelmVersion" -}}
+{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/common/templates/_compatibility.tpl b/charts/openproject/charts/common/templates/_compatibility.tpl
new file mode 100644
index 0000000..17665d5
--- /dev/null
+++ b/charts/openproject/charts/common/templates/_compatibility.tpl
@@ -0,0 +1,39 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return true if the detected platform is Openshift
+Usage:
+{{- include "common.compatibility.isOpenshift" . -}}
+*/}}
+{{- define "common.compatibility.isOpenshift" -}}
+{{- if .Capabilities.APIVersions.Has "security.openshift.io/v1" -}}
+{{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Render a compatible securityContext depending on the platform. By default it is maintained as it is. In other platforms like Openshift we remove default user/group values that do not work out of the box with the restricted-v1 SCC
+Usage:
+{{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) -}}
+*/}}
+{{- define "common.compatibility.renderSecurityContext" -}}
+{{- $adaptedContext := .secContext -}}
+{{- if .context.Values.global.compatibility -}}
+ {{- if .context.Values.global.compatibility.openshift -}}
+ {{- if or (eq .context.Values.global.compatibility.openshift.adaptSecurityContext "force") (and (eq .context.Values.global.compatibility.openshift.adaptSecurityContext "auto") (include "common.compatibility.isOpenshift" .context)) -}}
+ {{/* Remove incompatible user/group values that do not work in Openshift out of the box */}}
+ {{- $adaptedContext = omit $adaptedContext "fsGroup" "runAsUser" "runAsGroup" -}}
+ {{- if not .secContext.seLinuxOptions -}}
+ {{/* If it is an empty object, we remove it from the resulting context because it causes validation issues */}}
+ {{- $adaptedContext = omit $adaptedContext "seLinuxOptions" -}}
+ {{- end -}}
+ {{- end -}}
+ {{- end -}}
+{{- end -}}
+{{- omit $adaptedContext "enabled" | toYaml -}}
+{{- end -}}
diff --git a/charts/openproject/charts/common/templates/_errors.tpl b/charts/openproject/charts/common/templates/_errors.tpl
new file mode 100644
index 0000000..07ded6f
--- /dev/null
+++ b/charts/openproject/charts/common/templates/_errors.tpl
@@ -0,0 +1,28 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Through error when upgrading using empty passwords values that must not be empty.
+
+Usage:
+{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}}
+{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}}
+{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }}
+
+Required password params:
+ - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error.
+ - context - Context - Required. Parent context.
+*/}}
+{{- define "common.errors.upgrade.passwords.empty" -}}
+ {{- $validationErrors := join "" .validationErrors -}}
+ {{- if and $validationErrors .context.Release.IsUpgrade -}}
+ {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}}
+ {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}}
+ {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}}
+ {{- $errorString = print $errorString "\n%s" -}}
+ {{- printf $errorString $validationErrors | fail -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/common/templates/_images.tpl b/charts/openproject/charts/common/templates/_images.tpl
new file mode 100644
index 0000000..1bcb779
--- /dev/null
+++ b/charts/openproject/charts/common/templates/_images.tpl
@@ -0,0 +1,117 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Return the proper image name
+{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }}
+*/}}
+{{- define "common.images.image" -}}
+{{- $registryName := .imageRoot.registry -}}
+{{- $repositoryName := .imageRoot.repository -}}
+{{- $separator := ":" -}}
+{{- $termination := .imageRoot.tag | toString -}}
+{{- if .global }}
+ {{- if .global.imageRegistry }}
+ {{- $registryName = .global.imageRegistry -}}
+ {{- end -}}
+{{- end -}}
+{{- if .imageRoot.digest }}
+ {{- $separator = "@" -}}
+ {{- $termination = .imageRoot.digest | toString -}}
+{{- end -}}
+{{- if $registryName }}
+ {{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
+{{- else -}}
+ {{- printf "%s%s%s" $repositoryName $separator $termination -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead)
+{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }}
+*/}}
+{{- define "common.images.pullSecrets" -}}
+ {{- $pullSecrets := list }}
+
+ {{- if .global }}
+ {{- range .global.imagePullSecrets -}}
+ {{- if kindIs "map" . -}}
+ {{- $pullSecrets = append $pullSecrets .name -}}
+ {{- else -}}
+ {{- $pullSecrets = append $pullSecrets . -}}
+ {{- end }}
+ {{- end -}}
+ {{- end -}}
+
+ {{- range .images -}}
+ {{- range .pullSecrets -}}
+ {{- if kindIs "map" . -}}
+ {{- $pullSecrets = append $pullSecrets .name -}}
+ {{- else -}}
+ {{- $pullSecrets = append $pullSecrets . -}}
+ {{- end -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- if (not (empty $pullSecrets)) }}
+imagePullSecrets:
+ {{- range $pullSecrets | uniq }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names evaluating values as templates
+{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }}
+*/}}
+{{- define "common.images.renderPullSecrets" -}}
+ {{- $pullSecrets := list }}
+ {{- $context := .context }}
+
+ {{- if $context.Values.global }}
+ {{- range $context.Values.global.imagePullSecrets -}}
+ {{- if kindIs "map" . -}}
+ {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" .name "context" $context)) -}}
+ {{- else -}}
+ {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
+ {{- end -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- range .images -}}
+ {{- range .pullSecrets -}}
+ {{- if kindIs "map" . -}}
+ {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" .name "context" $context)) -}}
+ {{- else -}}
+ {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
+ {{- end -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- if (not (empty $pullSecrets)) }}
+imagePullSecrets:
+ {{- range $pullSecrets | uniq }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+{{- end -}}
+
+{{/*
+Return the proper image version (ingores image revision/prerelease info & fallbacks to chart appVersion)
+{{ include "common.images.version" ( dict "imageRoot" .Values.path.to.the.image "chart" .Chart ) }}
+*/}}
+{{- define "common.images.version" -}}
+{{- $imageTag := .imageRoot.tag | toString -}}
+{{/* regexp from https://github.com/Masterminds/semver/blob/23f51de38a0866c5ef0bfc42b3f735c73107b700/version.go#L41-L44 */}}
+{{- if regexMatch `^([0-9]+)(\.[0-9]+)?(\.[0-9]+)?(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?$` $imageTag -}}
+ {{- $version := semver $imageTag -}}
+ {{- printf "%d.%d.%d" $version.Major $version.Minor $version.Patch -}}
+{{- else -}}
+ {{- print .chart.AppVersion -}}
+{{- end -}}
+{{- end -}}
+
diff --git a/charts/openproject/charts/common/templates/_ingress.tpl b/charts/openproject/charts/common/templates/_ingress.tpl
new file mode 100644
index 0000000..efa5b85
--- /dev/null
+++ b/charts/openproject/charts/common/templates/_ingress.tpl
@@ -0,0 +1,73 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Generate backend entry that is compatible with all Kubernetes API versions.
+
+Usage:
+{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }}
+
+Params:
+ - serviceName - String. Name of an existing service backend
+ - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer.
+ - context - Dict - Required. The context for the template evaluation.
+*/}}
+{{- define "common.ingress.backend" -}}
+{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}}
+{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}}
+serviceName: {{ .serviceName }}
+servicePort: {{ .servicePort }}
+{{- else -}}
+service:
+ name: {{ .serviceName }}
+ port:
+ {{- if typeIs "string" .servicePort }}
+ name: {{ .servicePort }}
+ {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }}
+ number: {{ .servicePort | int }}
+ {{- end }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Print "true" if the API pathType field is supported
+Usage:
+{{ include "common.ingress.supportsPathType" . }}
+*/}}
+{{- define "common.ingress.supportsPathType" -}}
+{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}}
+{{- print "false" -}}
+{{- else -}}
+{{- print "true" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if the ingressClassname field is supported
+Usage:
+{{ include "common.ingress.supportsIngressClassname" . }}
+*/}}
+{{- define "common.ingress.supportsIngressClassname" -}}
+{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "false" -}}
+{{- else -}}
+{{- print "true" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if cert-manager required annotations for TLS signed
+certificates are set in the Ingress annotations
+Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
+Usage:
+{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }}
+*/}}
+{{- define "common.ingress.certManagerRequest" -}}
+{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/common/templates/_labels.tpl b/charts/openproject/charts/common/templates/_labels.tpl
new file mode 100644
index 0000000..d90a6cd
--- /dev/null
+++ b/charts/openproject/charts/common/templates/_labels.tpl
@@ -0,0 +1,46 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Kubernetes standard labels
+{{ include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) -}}
+*/}}
+{{- define "common.labels.standard" -}}
+{{- if and (hasKey . "customLabels") (hasKey . "context") -}}
+{{- $default := dict "app.kubernetes.io/name" (include "common.names.name" .context) "helm.sh/chart" (include "common.names.chart" .context) "app.kubernetes.io/instance" .context.Release.Name "app.kubernetes.io/managed-by" .context.Release.Service -}}
+{{- with .context.Chart.AppVersion -}}
+{{- $_ := set $default "app.kubernetes.io/version" . -}}
+{{- end -}}
+{{ template "common.tplvalues.merge" (dict "values" (list .customLabels $default) "context" .context) }}
+{{- else -}}
+app.kubernetes.io/name: {{ include "common.names.name" . }}
+helm.sh/chart: {{ include "common.names.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- with .Chart.AppVersion }}
+app.kubernetes.io/version: {{ . | quote }}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Labels used on immutable fields such as deploy.spec.selector.matchLabels or svc.spec.selector
+{{ include "common.labels.matchLabels" (dict "customLabels" .Values.podLabels "context" $) -}}
+
+We don't want to loop over custom labels appending them to the selector
+since it's very likely that it will break deployments, services, etc.
+However, it's important to overwrite the standard labels if the user
+overwrote them on metadata.labels fields.
+*/}}
+{{- define "common.labels.matchLabels" -}}
+{{- if and (hasKey . "customLabels") (hasKey . "context") -}}
+{{ merge (pick (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) "app.kubernetes.io/name" "app.kubernetes.io/instance") (dict "app.kubernetes.io/name" (include "common.names.name" .context) "app.kubernetes.io/instance" .context.Release.Name ) | toYaml }}
+{{- else -}}
+app.kubernetes.io/name: {{ include "common.names.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/common/templates/_names.tpl b/charts/openproject/charts/common/templates/_names.tpl
new file mode 100644
index 0000000..a222924
--- /dev/null
+++ b/charts/openproject/charts/common/templates/_names.tpl
@@ -0,0 +1,71 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "common.names.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "common.names.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "common.names.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified dependency name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+Usage:
+{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }}
+*/}}
+{{- define "common.names.dependency.fullname" -}}
+{{- if .chartValues.fullnameOverride -}}
+{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .chartName .chartValues.nameOverride -}}
+{{- if contains $name .context.Release.Name -}}
+{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Allow the release namespace to be overridden for multi-namespace deployments in combined charts.
+*/}}
+{{- define "common.names.namespace" -}}
+{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a fully qualified app name adding the installation's namespace.
+*/}}
+{{- define "common.names.fullname.namespace" -}}
+{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/charts/openproject/charts/common/templates/_resources.tpl b/charts/openproject/charts/common/templates/_resources.tpl
new file mode 100644
index 0000000..030fa1a
--- /dev/null
+++ b/charts/openproject/charts/common/templates/_resources.tpl
@@ -0,0 +1,50 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return a resource request/limit object based on a given preset.
+These presets are for basic testing and not meant to be used in production
+{{ include "common.resources.preset" (dict "type" "nano") -}}
+*/}}
+{{- define "common.resources.preset" -}}
+{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
+{{- $presets := dict
+ "nano" (dict
+ "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
+ "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "1024Mi")
+ )
+ "micro" (dict
+ "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
+ "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "1024Mi")
+ )
+ "small" (dict
+ "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
+ "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "1024Mi")
+ )
+ "medium" (dict
+ "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
+ "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "1024Mi")
+ )
+ "large" (dict
+ "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
+ "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi")
+ )
+ "xlarge" (dict
+ "requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi")
+ "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi")
+ )
+ "2xlarge" (dict
+ "requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi")
+ "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi")
+ )
+ }}
+{{- if hasKey $presets .type -}}
+{{- index $presets .type | toYaml -}}
+{{- else -}}
+{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/common/templates/_secrets.tpl b/charts/openproject/charts/common/templates/_secrets.tpl
new file mode 100644
index 0000000..84dbe38
--- /dev/null
+++ b/charts/openproject/charts/common/templates/_secrets.tpl
@@ -0,0 +1,182 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Generate secret name.
+
+Usage:
+{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }}
+
+Params:
+ - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
+ to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+ +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
+ - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment.
+ - context - Dict - Required. The context for the template evaluation.
+*/}}
+{{- define "common.secrets.name" -}}
+{{- $name := (include "common.names.fullname" .context) -}}
+
+{{- if .defaultNameSuffix -}}
+{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- with .existingSecret -}}
+{{- if not (typeIs "string" .) -}}
+{{- with .name -}}
+{{- $name = . -}}
+{{- end -}}
+{{- else -}}
+{{- $name = . -}}
+{{- end -}}
+{{- end -}}
+
+{{- printf "%s" $name -}}
+{{- end -}}
+
+{{/*
+Generate secret key.
+
+Usage:
+{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }}
+
+Params:
+ - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
+ to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+ +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
+ - key - String - Required. Name of the key in the secret.
+*/}}
+{{- define "common.secrets.key" -}}
+{{- $key := .key -}}
+
+{{- if .existingSecret -}}
+ {{- if not (typeIs "string" .existingSecret) -}}
+ {{- if .existingSecret.keyMapping -}}
+ {{- $key = index .existingSecret.keyMapping $.key -}}
+ {{- end -}}
+ {{- end }}
+{{- end -}}
+
+{{- printf "%s" $key -}}
+{{- end -}}
+
+{{/*
+Generate secret password or retrieve one if already created.
+
+Usage:
+{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }}
+
+Params:
+ - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+ - key - String - Required - Name of the key in the secret.
+ - providedValues - List<String> - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
+ - length - int - Optional - Length of the generated random password.
+ - strong - Boolean - Optional - Whether to add symbols to the generated random password.
+ - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart.
+ - context - Context - Required - Parent context.
+ - failOnNew - Boolean - Optional - Default to true. If set to false, skip errors adding new keys to existing secrets.
+ - skipB64enc - Boolean - Optional - Default to false. If set to true, no the secret will not be base64 encrypted.
+ - skipQuote - Boolean - Optional - Default to false. If set to true, no quotes will be added around the secret.
+The order in which this function returns a secret password:
+ 1. Already existing 'Secret' resource
+ (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned)
+ 2. Password provided via the values.yaml
+ (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned)
+ 3. Randomly generated secret password
+ (A new random secret password with the length specified in the 'length' parameter will be generated and returned)
+
+*/}}
+{{- define "common.secrets.passwords.manage" -}}
+
+{{- $password := "" }}
+{{- $subchart := "" }}
+{{- $chartName := default "" .chartName }}
+{{- $passwordLength := default 10 .length }}
+{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }}
+{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }}
+{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }}
+{{- if $secretData }}
+ {{- if hasKey $secretData .key }}
+ {{- $password = index $secretData .key | b64dec }}
+ {{- else if not (eq .failOnNew false) }}
+ {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}}
+ {{- else if $providedPasswordValue }}
+ {{- $password = $providedPasswordValue | toString }}
+ {{- end -}}
+{{- else if $providedPasswordValue }}
+ {{- $password = $providedPasswordValue | toString }}
+{{- else }}
+
+ {{- if .context.Values.enabled }}
+ {{- $subchart = $chartName }}
+ {{- end -}}
+
+ {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}}
+ {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}}
+ {{- $passwordValidationErrors := list $requiredPasswordError -}}
+ {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}}
+
+ {{- if .strong }}
+ {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }}
+ {{- $password = randAscii $passwordLength }}
+ {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }}
+ {{- $password = printf "%s%s" $subStr $password | toString | shuffle }}
+ {{- else }}
+ {{- $password = randAlphaNum $passwordLength }}
+ {{- end }}
+{{- end -}}
+{{- if not .skipB64enc }}
+{{- $password = $password | b64enc }}
+{{- end -}}
+{{- if .skipQuote -}}
+{{- printf "%s" $password -}}
+{{- else -}}
+{{- printf "%s" $password | quote -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Reuses the value from an existing secret, otherwise sets its value to a default value.
+
+Usage:
+{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }}
+
+Params:
+ - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+ - key - String - Required - Name of the key in the secret.
+ - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
+ - context - Context - Required - Parent context.
+
+*/}}
+{{- define "common.secrets.lookup" -}}
+{{- $value := "" -}}
+{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}}
+{{- if and $secretData (hasKey $secretData .key) -}}
+ {{- $value = index $secretData .key -}}
+{{- else if .defaultValue -}}
+ {{- $value = .defaultValue | toString | b64enc -}}
+{{- end -}}
+{{- if $value -}}
+{{- printf "%s" $value -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns whether a previous generated secret already exists
+
+Usage:
+{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }}
+
+Params:
+ - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+ - context - Context - Required - Parent context.
+*/}}
+{{- define "common.secrets.exists" -}}
+{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }}
+{{- if $secret }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/common/templates/_storage.tpl b/charts/openproject/charts/common/templates/_storage.tpl
new file mode 100644
index 0000000..16405a0
--- /dev/null
+++ b/charts/openproject/charts/common/templates/_storage.tpl
@@ -0,0 +1,28 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Return the proper Storage Class
+{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }}
+*/}}
+{{- define "common.storage.class" -}}
+
+{{- $storageClass := .persistence.storageClass -}}
+{{- if .global -}}
+ {{- if .global.storageClass -}}
+ {{- $storageClass = .global.storageClass -}}
+ {{- end -}}
+{{- end -}}
+
+{{- if $storageClass -}}
+ {{- if (eq "-" $storageClass) -}}
+ {{- printf "storageClassName: \"\"" -}}
+ {{- else }}
+ {{- printf "storageClassName: %s" $storageClass -}}
+ {{- end -}}
+{{- end -}}
+
+{{- end -}}
diff --git a/charts/openproject/charts/common/templates/_tplvalues.tpl b/charts/openproject/charts/common/templates/_tplvalues.tpl
new file mode 100644
index 0000000..a8ed763
--- /dev/null
+++ b/charts/openproject/charts/common/templates/_tplvalues.tpl
@@ -0,0 +1,38 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Renders a value that contains template perhaps with scope if the scope is present.
+Usage:
+{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ ) }}
+{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ "scope" $app ) }}
+*/}}
+{{- define "common.tplvalues.render" -}}
+{{- $value := typeIs "string" .value | ternary .value (.value | toYaml) }}
+{{- if contains "{{" (toJson .value) }}
+ {{- if .scope }}
+ {{- tpl (cat "{{- with $.RelativeScope -}}" $value "{{- end }}") (merge (dict "RelativeScope" .scope) .context) }}
+ {{- else }}
+ {{- tpl $value .context }}
+ {{- end }}
+{{- else }}
+ {{- $value }}
+{{- end }}
+{{- end -}}
+
+{{/*
+Merge a list of values that contains template after rendering them.
+Merge precedence is consistent with http://masterminds.github.io/sprig/dicts.html#merge-mustmerge
+Usage:
+{{ include "common.tplvalues.merge" ( dict "values" (list .Values.path.to.the.Value1 .Values.path.to.the.Value2) "context" $ ) }}
+*/}}
+{{- define "common.tplvalues.merge" -}}
+{{- $dst := dict -}}
+{{- range .values -}}
+{{- $dst = include "common.tplvalues.render" (dict "value" . "context" $.context "scope" $.scope) | fromYaml | merge $dst -}}
+{{- end -}}
+{{ $dst | toYaml }}
+{{- end -}}
diff --git a/charts/openproject/charts/common/templates/_utils.tpl b/charts/openproject/charts/common/templates/_utils.tpl
new file mode 100644
index 0000000..bfbddf0
--- /dev/null
+++ b/charts/openproject/charts/common/templates/_utils.tpl
@@ -0,0 +1,77 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Print instructions to get a secret value.
+Usage:
+{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }}
+*/}}
+{{- define "common.utils.secret.getvalue" -}}
+{{- $varname := include "common.utils.fieldToEnvVar" . -}}
+export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d)
+{{- end -}}
+
+{{/*
+Build env var name given a field
+Usage:
+{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }}
+*/}}
+{{- define "common.utils.fieldToEnvVar" -}}
+ {{- $fieldNameSplit := splitList "-" .field -}}
+ {{- $upperCaseFieldNameSplit := list -}}
+
+ {{- range $fieldNameSplit -}}
+ {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}}
+ {{- end -}}
+
+ {{ join "_" $upperCaseFieldNameSplit }}
+{{- end -}}
+
+{{/*
+Gets a value from .Values given
+Usage:
+{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }}
+*/}}
+{{- define "common.utils.getValueFromKey" -}}
+{{- $splitKey := splitList "." .key -}}
+{{- $value := "" -}}
+{{- $latestObj := $.context.Values -}}
+{{- range $splitKey -}}
+ {{- if not $latestObj -}}
+ {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}}
+ {{- end -}}
+ {{- $value = ( index $latestObj . ) -}}
+ {{- $latestObj = $value -}}
+{{- end -}}
+{{- printf "%v" (default "" $value) -}}
+{{- end -}}
+
+{{/*
+Returns first .Values key with a defined value or first of the list if all non-defined
+Usage:
+{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }}
+*/}}
+{{- define "common.utils.getKeyFromList" -}}
+{{- $key := first .keys -}}
+{{- $reverseKeys := reverse .keys }}
+{{- range $reverseKeys }}
+ {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }}
+ {{- if $value -}}
+ {{- $key = . }}
+ {{- end -}}
+{{- end -}}
+{{- printf "%s" $key -}}
+{{- end -}}
+
+{{/*
+Checksum a template at "path" containing a *single* resource (ConfigMap,Secret) for use in pod annotations, excluding the metadata (see #18376).
+Usage:
+{{ include "common.utils.checksumTemplate" (dict "path" "/configmap.yaml" "context" $) }}
+*/}}
+{{- define "common.utils.checksumTemplate" -}}
+{{- $obj := include (print .context.Template.BasePath .path) .context | fromYaml -}}
+{{ omit $obj "apiVersion" "kind" "metadata" | toYaml | sha256sum }}
+{{- end -}}
diff --git a/charts/openproject/charts/common/templates/_warnings.tpl b/charts/openproject/charts/common/templates/_warnings.tpl
new file mode 100644
index 0000000..0f763cd
--- /dev/null
+++ b/charts/openproject/charts/common/templates/_warnings.tpl
@@ -0,0 +1,82 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Warning about using rolling tag.
+Usage:
+{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }}
+*/}}
+{{- define "common.warnings.rollingTag" -}}
+
+{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }}
+WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
++info https://docs.bitnami.com/tutorials/understand-rolling-tags-containers
+{{- end }}
+{{- end -}}
+
+{{/*
+Warning about not setting the resource object in all deployments.
+Usage:
+{{ include "common.warnings.resources" (dict "sections" (list "path1" "path2") context $) }}
+Example:
+{{- include "common.warnings.resources" (dict "sections" (list "csiProvider.provider" "server" "volumePermissions" "") "context" $) }}
+The list in the example assumes that the following values exist:
+ - csiProvider.provider.resources
+ - server.resources
+ - volumePermissions.resources
+ - resources
+*/}}
+{{- define "common.warnings.resources" -}}
+{{- $values := .context.Values -}}
+{{- $printMessage := false -}}
+{{ $affectedSections := list -}}
+{{- range .sections -}}
+ {{- if eq . "" -}}
+ {{/* Case where the resources section is at the root (one main deployment in the chart) */}}
+ {{- if not (index $values "resources") -}}
+ {{- $affectedSections = append $affectedSections "resources" -}}
+ {{- $printMessage = true -}}
+ {{- end -}}
+ {{- else -}}
+ {{/* Case where the are multiple resources sections (more than one main deployment in the chart) */}}
+ {{- $keys := split "." . -}}
+ {{/* We iterate through the different levels until arriving to the resource section. Example: a.b.c.resources */}}
+ {{- $section := $values -}}
+ {{- range $keys -}}
+ {{- $section = index $section . -}}
+ {{- end -}}
+ {{- if not (index $section "resources") -}}
+ {{/* If the section has enabled=false or replicaCount=0, do not include it */}}
+ {{- if and (hasKey $section "enabled") -}}
+ {{- if index $section "enabled" -}}
+ {{/* enabled=true */}}
+ {{- $affectedSections = append $affectedSections (printf "%s.resources" .) -}}
+ {{- $printMessage = true -}}
+ {{- end -}}
+ {{- else if and (hasKey $section "replicaCount") -}}
+ {{/* We need a casting to int because number 0 is not treated as an int by default */}}
+ {{- if (gt (index $section "replicaCount" | int) 0) -}}
+ {{/* replicaCount > 0 */}}
+ {{- $affectedSections = append $affectedSections (printf "%s.resources" .) -}}
+ {{- $printMessage = true -}}
+ {{- end -}}
+ {{- else -}}
+ {{/* Default case, add it to the affected sections */}}
+ {{- $affectedSections = append $affectedSections (printf "%s.resources" .) -}}
+ {{- $printMessage = true -}}
+ {{- end -}}
+ {{- end -}}
+ {{- end -}}
+{{- end -}}
+{{- if $printMessage }}
+
+WARNING: There are "resources" sections in the chart not set. Using "resourcesPreset" is not recommended for production. For production installations, please set the following values according to your workload needs:
+{{- range $affectedSections }}
+ - {{ . }}
+{{- end }}
++info https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+{{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/common/templates/validations/_cassandra.tpl b/charts/openproject/charts/common/templates/validations/_cassandra.tpl
new file mode 100644
index 0000000..eda9aad
--- /dev/null
+++ b/charts/openproject/charts/common/templates/validations/_cassandra.tpl
@@ -0,0 +1,77 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate Cassandra required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret"
+ - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.cassandra.passwords" -}}
+ {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}}
+ {{- $enabled := include "common.cassandra.values.enabled" . -}}
+ {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}}
+ {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.cassandra.values.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.cassandra.values.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.cassandra.dbUser.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.dbUser.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled cassandra.
+
+Usage:
+{{ include "common.cassandra.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.cassandra.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.cassandra.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key dbUser
+
+Usage:
+{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.cassandra.values.key.dbUser" -}}
+ {{- if .subchart -}}
+ cassandra.dbUser
+ {{- else -}}
+ dbUser
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/common/templates/validations/_mariadb.tpl b/charts/openproject/charts/common/templates/validations/_mariadb.tpl
new file mode 100644
index 0000000..17d83a2
--- /dev/null
+++ b/charts/openproject/charts/common/templates/validations/_mariadb.tpl
@@ -0,0 +1,108 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MariaDB required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret"
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mariadb.passwords" -}}
+ {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}}
+ {{- $enabled := include "common.mariadb.values.enabled" . -}}
+ {{- $architecture := include "common.mariadb.values.architecture" . -}}
+ {{- $authPrefix := include "common.mariadb.values.key.auth" . -}}
+ {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+ {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+ {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+ {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+ {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+ {{- if not (empty $valueUsername) -}}
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+ {{- end -}}
+
+ {{- if (eq $architecture "replication") -}}
+ {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.auth.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mariadb.auth.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.auth.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mariadb.
+
+Usage:
+{{ include "common.mariadb.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mariadb.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.mariadb.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.architecture" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mariadb.architecture -}}
+ {{- else -}}
+ {{- .context.Values.architecture -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.key.auth" -}}
+ {{- if .subchart -}}
+ mariadb.auth
+ {{- else -}}
+ auth
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/common/templates/validations/_mongodb.tpl b/charts/openproject/charts/common/templates/validations/_mongodb.tpl
new file mode 100644
index 0000000..bbb445b
--- /dev/null
+++ b/charts/openproject/charts/common/templates/validations/_mongodb.tpl
@@ -0,0 +1,113 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MongoDB® required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret"
+ - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mongodb.passwords" -}}
+ {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}}
+ {{- $enabled := include "common.mongodb.values.enabled" . -}}
+ {{- $authPrefix := include "common.mongodb.values.key.auth" . -}}
+ {{- $architecture := include "common.mongodb.values.architecture" . -}}
+ {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+ {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+ {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}}
+ {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+ {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}}
+ {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}}
+
+ {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+ {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+ {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }}
+ {{- if and $valueUsername $valueDatabase -}}
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+ {{- end -}}
+
+ {{- if (eq $architecture "replicaset") -}}
+ {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.auth.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mongodb.auth.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.auth.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mongodb.
+
+Usage:
+{{ include "common.mongodb.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mongodb.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.mongodb.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.key.auth" -}}
+ {{- if .subchart -}}
+ mongodb.auth
+ {{- else -}}
+ auth
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.architecture" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mongodb.architecture -}}
+ {{- else -}}
+ {{- .context.Values.architecture -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/common/templates/validations/_mysql.tpl b/charts/openproject/charts/common/templates/validations/_mysql.tpl
new file mode 100644
index 0000000..ca3953f
--- /dev/null
+++ b/charts/openproject/charts/common/templates/validations/_mysql.tpl
@@ -0,0 +1,108 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MySQL required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret"
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mysql.passwords" -}}
+ {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}}
+ {{- $enabled := include "common.mysql.values.enabled" . -}}
+ {{- $architecture := include "common.mysql.values.architecture" . -}}
+ {{- $authPrefix := include "common.mysql.values.key.auth" . -}}
+ {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+ {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+ {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+ {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+ {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+ {{- if not (empty $valueUsername) -}}
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+ {{- end -}}
+
+ {{- if (eq $architecture "replication") -}}
+ {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.auth.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mysql.auth.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.auth.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mysql.
+
+Usage:
+{{ include "common.mysql.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mysql.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.mysql.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.architecture" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mysql.architecture -}}
+ {{- else -}}
+ {{- .context.Values.architecture -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.key.auth" -}}
+ {{- if .subchart -}}
+ mysql.auth
+ {{- else -}}
+ auth
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/common/templates/validations/_postgresql.tpl b/charts/openproject/charts/common/templates/validations/_postgresql.tpl
new file mode 100644
index 0000000..8c9aa57
--- /dev/null
+++ b/charts/openproject/charts/common/templates/validations/_postgresql.tpl
@@ -0,0 +1,134 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate PostgreSQL required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret"
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.postgresql.passwords" -}}
+ {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}}
+ {{- $enabled := include "common.postgresql.values.enabled" . -}}
+ {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}}
+ {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}}
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+ {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}}
+
+ {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}}
+ {{- if (eq $enabledReplication "true") -}}
+ {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to decide whether evaluate global values.
+
+Usage:
+{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }}
+Params:
+ - key - String - Required. Field to be evaluated within global, e.g: "existingSecret"
+*/}}
+{{- define "common.postgresql.values.use.global" -}}
+ {{- if .context.Values.global -}}
+ {{- if .context.Values.global.postgresql -}}
+ {{- index .context.Values.global.postgresql .key | quote -}}
+ {{- end -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.postgresql.values.existingSecret" (dict "context" $) }}
+*/}}
+{{- define "common.postgresql.values.existingSecret" -}}
+ {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}}
+
+ {{- if .subchart -}}
+ {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}}
+ {{- else -}}
+ {{- default (.context.Values.existingSecret | quote) $globalValue -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled postgresql.
+
+Usage:
+{{ include "common.postgresql.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.postgresql.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.postgresql.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key postgressPassword.
+
+Usage:
+{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.key.postgressPassword" -}}
+ {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}}
+
+ {{- if not $globalValue -}}
+ {{- if .subchart -}}
+ postgresql.postgresqlPassword
+ {{- else -}}
+ postgresqlPassword
+ {{- end -}}
+ {{- else -}}
+ global.postgresql.postgresqlPassword
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled.replication.
+
+Usage:
+{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.enabled.replication" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.postgresql.replication.enabled -}}
+ {{- else -}}
+ {{- printf "%v" .context.Values.replication.enabled -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key replication.password.
+
+Usage:
+{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.key.replicationPassword" -}}
+ {{- if .subchart -}}
+ postgresql.replication.password
+ {{- else -}}
+ replication.password
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/common/templates/validations/_redis.tpl b/charts/openproject/charts/common/templates/validations/_redis.tpl
new file mode 100644
index 0000000..fc0d208
--- /dev/null
+++ b/charts/openproject/charts/common/templates/validations/_redis.tpl
@@ -0,0 +1,81 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate Redis® required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret"
+ - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.redis.passwords" -}}
+ {{- $enabled := include "common.redis.values.enabled" . -}}
+ {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}}
+ {{- $standarizedVersion := include "common.redis.values.standarized.version" . }}
+
+ {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }}
+ {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }}
+
+ {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }}
+ {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}}
+ {{- if eq $useAuth "true" -}}
+ {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled redis.
+
+Usage:
+{{ include "common.redis.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.redis.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.redis.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right prefix path for the values
+
+Usage:
+{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false
+*/}}
+{{- define "common.redis.values.keys.prefix" -}}
+ {{- if .subchart -}}redis.{{- else -}}{{- end -}}
+{{- end -}}
+
+{{/*
+Checks whether the redis chart's includes the standarizations (version >= 14)
+
+Usage:
+{{ include "common.redis.values.standarized.version" (dict "context" $) }}
+*/}}
+{{- define "common.redis.values.standarized.version" -}}
+
+ {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}}
+ {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }}
+
+ {{- if $standarizedAuthValues -}}
+ {{- true -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/common/templates/validations/_validations.tpl b/charts/openproject/charts/common/templates/validations/_validations.tpl
new file mode 100644
index 0000000..31ceda8
--- /dev/null
+++ b/charts/openproject/charts/common/templates/validations/_validations.tpl
@@ -0,0 +1,51 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate values must not be empty.
+
+Usage:
+{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}}
+{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}}
+{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
+
+Validate value params:
+ - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password"
+ - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret"
+ - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password"
+*/}}
+{{- define "common.validations.values.multiple.empty" -}}
+ {{- range .required -}}
+ {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Validate a value must not be empty.
+
+Usage:
+{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }}
+
+Validate value params:
+ - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password"
+ - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret"
+ - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password"
+ - subchart - String - Optional - Name of the subchart that the validated password is part of.
+*/}}
+{{- define "common.validations.values.single.empty" -}}
+ {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }}
+ {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }}
+
+ {{- if not $value -}}
+ {{- $varname := "my-value" -}}
+ {{- $getCurrentValue := "" -}}
+ {{- if and .secret .field -}}
+ {{- $varname = include "common.utils.fieldToEnvVar" . -}}
+ {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}}
+ {{- end -}}
+ {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/common/values.yaml b/charts/openproject/charts/common/values.yaml
new file mode 100644
index 0000000..9abe0e1
--- /dev/null
+++ b/charts/openproject/charts/common/values.yaml
@@ -0,0 +1,8 @@
+# Copyright VMware, Inc.
+# SPDX-License-Identifier: APACHE-2.0
+
+## bitnami/common
+## It is required by CI/CD tools and processes.
+## @skip exampleValue
+##
+exampleValue: common-chart
diff --git a/charts/openproject/charts/memcached/.helmignore b/charts/openproject/charts/memcached/.helmignore
new file mode 100644
index 0000000..fb56657
--- /dev/null
+++ b/charts/openproject/charts/memcached/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+# img folder
+img/
diff --git a/charts/openproject/charts/memcached/Chart.lock b/charts/openproject/charts/memcached/Chart.lock
new file mode 100644
index 0000000..80a5f12
--- /dev/null
+++ b/charts/openproject/charts/memcached/Chart.lock
@@ -0,0 +1,6 @@
+dependencies:
+- name: common
+ repository: oci://registry-1.docker.io/bitnamicharts
+ version: 2.18.0
+digest: sha256:f489ae7394a4eceb24fb702901483c67a5b4fff605f19d5e2545e3a6778e1280
+generated: "2024-03-05T14:45:44.308851503+01:00"
diff --git a/charts/openproject/charts/memcached/Chart.yaml b/charts/openproject/charts/memcached/Chart.yaml
new file mode 100644
index 0000000..6872261
--- /dev/null
+++ b/charts/openproject/charts/memcached/Chart.yaml
@@ -0,0 +1,33 @@
+annotations:
+ category: Infrastructure
+ images: |
+ - name: memcached
+ image: docker.io/bitnami/memcached:1.6.24-debian-12-r0
+ - name: memcached-exporter
+ image: docker.io/bitnami/memcached-exporter:0.14.2-debian-12-r10
+ - name: os-shell
+ image: docker.io/bitnami/os-shell:12-debian-12-r16
+ licenses: Apache-2.0
+apiVersion: v2
+appVersion: 1.6.24
+dependencies:
+- name: common
+ repository: oci://registry-1.docker.io/bitnamicharts
+ tags:
+ - bitnami-common
+ version: 2.x.x
+description: Memcached is an high-performance, distributed memory object caching system,
+ generic in nature, but intended for use in speeding up dynamic web applications
+ by alleviating database load.
+home: https://bitnami.com
+icon: https://bitnami.com/assets/stacks/memcached/img/memcached-stack-220x234.png
+keywords:
+- memcached
+- cache
+maintainers:
+- name: VMware, Inc.
+ url: https://github.com/bitnami/charts
+name: memcached
+sources:
+- https://github.com/bitnami/charts/tree/main/bitnami/memcached
+version: 6.14.0
diff --git a/charts/openproject/charts/memcached/README.md b/charts/openproject/charts/memcached/README.md
new file mode 100644
index 0000000..a8ed875
--- /dev/null
+++ b/charts/openproject/charts/memcached/README.md
@@ -0,0 +1,452 @@
+<!--- app-name: Memcached -->
+
+# Bitnami package for Memcached
+
+Memcached is an high-performance, distributed memory object caching system, generic in nature, but intended for use in speeding up dynamic web applications by alleviating database load.
+
+[Overview of Memcached](http://memcached.org)
+
+Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement.
+
+## TL;DR
+
+```console
+helm install my-release oci://registry-1.docker.io/bitnamicharts/memcached
+```
+
+Looking to use Memcached in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog.
+
+## Introduction
+
+This chart bootstraps a [Memcached](https://github.com/bitnami/containers/tree/main/bitnami/memcached) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
+
+## Prerequisites
+
+- Kubernetes 1.23+
+- Helm 3.8.0+
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```console
+helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/memcached
+```
+
+> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`.
+
+These commands deploy Memcached on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```console
+helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Parameters
+
+### Global parameters
+
+| Name | Description | Value |
+| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- |
+| `global.imageRegistry` | Global Docker image registry | `""` |
+| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
+| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
+| `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `disabled` |
+
+### Common parameters
+
+| Name | Description | Value |
+| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- |
+| `kubeVersion` | Override Kubernetes version | `""` |
+| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` |
+| `fullnameOverride` | String to fully override common.names.fullname template | `""` |
+| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` |
+| `extraDeploy` | Extra objects to deploy (evaluated as a template) | `[]` |
+| `commonLabels` | Add labels to all the deployed resources | `{}` |
+| `commonAnnotations` | Add annotations to all the deployed resources | `{}` |
+| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` |
+| `diagnosticMode.command` | Command to override all containers in the deployment/statefulset | `["sleep"]` |
+| `diagnosticMode.args` | Args to override all containers in the deployment/statefulset | `["infinity"]` |
+
+### Memcached parameters
+
+| Name | Description | Value |
+| ----------------------------- | --------------------------------------------------------------------------------------------------------- | --------------------------- |
+| `image.registry` | Memcached image registry | `REGISTRY_NAME` |
+| `image.repository` | Memcached image repository | `REPOSITORY_NAME/memcached` |
+| `image.digest` | Memcached image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
+| `image.pullPolicy` | Memcached image pull policy | `IfNotPresent` |
+| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
+| `image.debug` | Specify if debug values should be set | `false` |
+| `architecture` | Memcached architecture. Allowed values: standalone or high-availability | `standalone` |
+| `auth.enabled` | Enable Memcached authentication | `false` |
+| `auth.username` | Memcached admin user | `""` |
+| `auth.password` | Memcached admin password | `""` |
+| `auth.existingPasswordSecret` | Existing secret with Memcached credentials (must contain a value for `memcached-password` key) | `""` |
+| `command` | Override default container command (useful when using custom images) | `[]` |
+| `args` | Override default container args (useful when using custom images) | `[]` |
+| `extraEnvVars` | Array with extra environment variables to add to Memcached nodes | `[]` |
+| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Memcached nodes | `""` |
+| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Memcached nodes | `""` |
+
+### Deployment/Statefulset parameters
+
+| Name | Description | Value |
+| --------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- |
+| `replicaCount` | Number of Memcached nodes | `1` |
+| `containerPorts.memcached` | Memcached container port | `11211` |
+| `livenessProbe.enabled` | Enable livenessProbe on Memcached containers | `true` |
+| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` |
+| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
+| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
+| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
+| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
+| `readinessProbe.enabled` | Enable readinessProbe on Memcached containers | `true` |
+| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
+| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` |
+| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `3` |
+| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` |
+| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
+| `startupProbe.enabled` | Enable startupProbe on Memcached containers | `false` |
+| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` |
+| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
+| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` |
+| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` |
+| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
+| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
+| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
+| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
+| `lifecycleHooks` | for the Memcached container(s) to automate configuration before or after startup | `{}` |
+| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `none` |
+| `resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
+| `podSecurityContext.enabled` | Enabled Memcached pods' Security Context | `true` |
+| `podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` |
+| `podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` |
+| `podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` |
+| `podSecurityContext.fsGroup` | Set Memcached pod's Security Context fsGroup | `1001` |
+| `containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
+| `containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` |
+| `containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
+| `containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` |
+| `containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
+| `containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
+| `containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` |
+| `containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` |
+| `containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` |
+| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` |
+| `automountServiceAccountToken` | Mount Service Account token in pod | `false` |
+| `hostAliases` | Add deployment host aliases | `[]` |
+| `podLabels` | Extra labels for Memcached pods | `{}` |
+| `podAnnotations` | Annotations for Memcached pods | `{}` |
+| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` |
+| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` |
+| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` |
+| `affinity` | Affinity for pod assignment | `{}` |
+| `nodeSelector` | Node labels for pod assignment | `{}` |
+| `tolerations` | Tolerations for pod assignment | `[]` |
+| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
+| `podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel` | `Parallel` |
+| `priorityClassName` | Name of the existing priority class to be used by Memcached pods, priority class needs to be created beforehand | `""` |
+| `schedulerName` | Kubernetes pod scheduler registry | `""` |
+| `terminationGracePeriodSeconds` | In seconds, time the given to the memcached pod needs to terminate gracefully | `""` |
+| `updateStrategy.type` | Memcached statefulset strategy type | `RollingUpdate` |
+| `updateStrategy.rollingUpdate` | Memcached statefulset rolling update configuration parameters | `{}` |
+| `extraVolumes` | Optionally specify extra list of additional volumes for the Memcached pod(s) | `[]` |
+| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Memcached container(s) | `[]` |
+| `sidecars` | Add additional sidecar containers to the Memcached pod(s) | `[]` |
+| `initContainers` | Add additional init containers to the Memcached pod(s) | `[]` |
+| `autoscaling.enabled` | Enable memcached statefulset autoscaling (requires architecture: "high-availability") | `false` |
+| `autoscaling.minReplicas` | memcached statefulset autoscaling minimum number of replicas | `3` |
+| `autoscaling.maxReplicas` | memcached statefulset autoscaling maximum number of replicas | `6` |
+| `autoscaling.targetCPU` | memcached statefulset autoscaling target CPU percentage | `50` |
+| `autoscaling.targetMemory` | memcached statefulset autoscaling target CPU memory | `50` |
+| `pdb.create` | Deploy a pdb object for the Memcached pod | `false` |
+| `pdb.minAvailable` | Minimum available Memcached replicas | `""` |
+| `pdb.maxUnavailable` | Maximum unavailable Memcached replicas | `1` |
+
+### Traffic Exposure parameters
+
+| Name | Description | Value |
+| --------------------------------------- | --------------------------------------------------------------------------------------- | ----------- |
+| `service.type` | Kubernetes Service type | `ClusterIP` |
+| `service.ports.memcached` | Memcached service port | `11211` |
+| `service.nodePorts.memcached` | Node port for Memcached | `""` |
+| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `""` |
+| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
+| `service.clusterIP` | Memcached service Cluster IP | `""` |
+| `service.loadBalancerIP` | Memcached service Load Balancer IP | `""` |
+| `service.loadBalancerSourceRanges` | Memcached service Load Balancer sources | `[]` |
+| `service.externalTrafficPolicy` | Memcached service external traffic policy | `Cluster` |
+| `service.annotations` | Additional custom annotations for Memcached service | `{}` |
+| `service.extraPorts` | Extra ports to expose in the Memcached service (normally used with the `sidecar` value) | `[]` |
+| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `true` |
+| `networkPolicy.allowExternal` | The Policy model to apply | `true` |
+| `networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` |
+| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` |
+| `networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` |
+| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` |
+| `networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` |
+
+### Other Parameters
+
+| Name | Description | Value |
+| --------------------------------------------- | ---------------------------------------------------------------------- | ------- |
+| `serviceAccount.create` | Enable creation of ServiceAccount for Memcached pod | `true` |
+| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
+| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` |
+| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` |
+
+### Persistence parameters
+
+| Name | Description | Value |
+| -------------------------- | ------------------------------------------------------------------------ | ------------------- |
+| `persistence.enabled` | Enable Memcached data persistence using PVC. If false, use emptyDir | `false` |
+| `persistence.storageClass` | PVC Storage Class for Memcached data volume | `""` |
+| `persistence.accessModes` | PVC Access modes | `["ReadWriteOnce"]` |
+| `persistence.size` | PVC Storage Request for Memcached data volume | `8Gi` |
+| `persistence.annotations` | Annotations for the PVC | `{}` |
+| `persistence.labels` | Labels for the PVC | `{}` |
+| `persistence.selector` | Selector to match an existing Persistent Volume for Memcached's data PVC | `{}` |
+
+### Volume Permissions parameters
+
+| Name | Description | Value |
+| ----------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------ |
+| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` |
+| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` |
+| `volumePermissions.image.repository` | Init container volume-permissions image repository | `REPOSITORY_NAME/os-shell` |
+| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
+| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
+| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` |
+| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `none` |
+| `volumePermissions.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
+| `volumePermissions.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` |
+| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` |
+| `metrics.enabled` | Start a side-car prometheus exporter | `false` |
+| `metrics.image.registry` | Memcached exporter image registry | `REGISTRY_NAME` |
+| `metrics.image.repository` | Memcached exporter image repository | `REPOSITORY_NAME/memcached-exporter` |
+| `metrics.image.digest` | Memcached exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
+| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` |
+| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
+| `metrics.containerPorts.metrics` | Memcached Prometheus Exporter container port | `9150` |
+| `metrics.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production). | `none` |
+| `metrics.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
+| `metrics.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
+| `metrics.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` |
+| `metrics.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
+| `metrics.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` |
+| `metrics.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
+| `metrics.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
+| `metrics.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` |
+| `metrics.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` |
+| `metrics.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` |
+| `metrics.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` |
+| `metrics.livenessProbe.enabled` | Enable livenessProbe on Memcached Prometheus exporter containers | `true` |
+| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `15` |
+| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
+| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
+| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` |
+| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
+| `metrics.readinessProbe.enabled` | Enable readinessProbe on Memcached Prometheus exporter containers | `true` |
+| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
+| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
+| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `3` |
+| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` |
+| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
+| `metrics.startupProbe.enabled` | Enable startupProbe on Memcached Prometheus exporter containers | `false` |
+| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` |
+| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
+| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` |
+| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` |
+| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
+| `metrics.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
+| `metrics.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
+| `metrics.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
+| `metrics.podAnnotations` | Memcached Prometheus exporter pod Annotation and Labels | `{}` |
+| `metrics.service.ports.metrics` | Prometheus metrics service port | `9150` |
+| `metrics.service.clusterIP` | Static clusterIP or None for headless services | `""` |
+| `metrics.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` |
+| `metrics.service.annotations` | Annotations for the Prometheus metrics service | `{}` |
+| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` |
+| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` |
+| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` |
+| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` |
+| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` |
+| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` |
+| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
+| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` |
+| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` |
+| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` |
+
+The above parameters map to the environment variables defined in the [bitnami/memcached](https://github.com/bitnami/containers/tree/main/bitnami/memcached) container image. For more information please refer to the [bitnami/memcached](https://github.com/bitnami/containers/tree/main/bitnami/memcached) container image documentation.
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+helm install my-release --set auth.username=user,auth.password=password oci://REGISTRY_NAME/REPOSITORY_NAME/memcached
+```
+
+> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`.
+
+The above command sets the Memcached admin account username and password to `user` and `password` respectively.
+
+> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available.
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
+
+```console
+helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/memcached
+```
+
+> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`.
+> **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/memcached/values.yaml)
+
+## Configuration and installation details
+
+### Resource requests and limits
+
+Bitnami charts allow setting resource requests and limits for all containers inside the chart deployment. These are inside the `resources` value (check parameter table). Setting requests is essential for production workloads and these should be adapted to your specific use case.
+
+To make this process easier, the chart contains the `resourcesPreset` values, which automatically sets the `resources` section according to different presets. Check these presets in [the bitnami/common chart](https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15). However, in production workloads using `resourcePreset` is discouraged as it may not fully adapt to your specific needs. Find more information on container resource management in the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
+
+### [Rolling vs Immutable tags](https://docs.bitnami.com/tutorials/understand-rolling-tags-containers)
+
+It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
+
+Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
+
+### Use Sidecars and Init Containers
+
+If additional containers are needed in the same pod (such as additional metrics or logging exporters), they can be defined using the `sidecars` config parameter.
+
+```yaml
+sidecars:
+- name: your-image-name
+ image: your-image
+ imagePullPolicy: Always
+ ports:
+ - name: portname
+ containerPort: 1234
+```
+
+If these sidecars export extra ports, extra port definitions can be added using the `service.extraPorts` parameter (where available), as shown in the example below:
+
+```yaml
+service:
+ extraPorts:
+ - name: extraPort
+ port: 11311
+ targetPort: 11311
+```
+
+> NOTE: This Helm chart already includes sidecar containers for the Prometheus exporters (where applicable). These can be activated by adding the `--enable-metrics=true` parameter at deployment time. The `sidecars` parameter should therefore only be used for any extra sidecar containers.
+
+If additional init containers are needed in the same pod, they can be defined using the `initContainers` parameter. Here is an example:
+
+```yaml
+initContainers:
+ - name: your-image-name
+ image: your-image
+ imagePullPolicy: Always
+ ports:
+ - name: portname
+ containerPort: 1234
+```
+
+Learn more about [sidecar containers](https://kubernetes.io/docs/concepts/workloads/pods/) and [init containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/).
+
+### Set Pod affinity
+
+This chart allows you to set your custom affinity using the `affinity` parameter(s). Find more information about Pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
+
+As an alternative, you can use the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters.
+
+## Persistence
+
+When using `architecture: "high-availability"` the [Bitnami Memcached](https://github.com/bitnami/containers/tree/main/bitnami/memcached) image stores the cache-state at the `/cache-state` path of the container if enabled.
+
+Persistent Volume Claims (PVCs) are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube.
+
+See the [Parameters](#parameters) section to configure the PVC or to disable persistence.
+
+If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/).
+
+## Troubleshooting
+
+Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
+
+## Upgrading
+
+### To 6.0.0
+
+This major release renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository.
+
+Some affected values are:
+
+- `memcachedUsername` and `memcachedPassword` have regrouped under the `auth` map.
+- `arguments` has been renamed to `args`.
+- `extraEnv` has been renamed to `extraEnvVars`.
+- `service.port`, `service.internalPort` and `service.externalPort` have been regrouped under the `service.ports` map.
+- `metrics.kafka.service.port` has been regrouped under the `metrics.kafka.service.ports` map.
+- `metrics.jmx.service.port` has been regrouped under the `metrics.jmx.service.ports` map.
+- `updateStrategy` (string) and `rollingUpdatePartition` are regrouped under the `updateStrategy` map.
+
+### To 5.3.0
+
+This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade.
+
+### To 5.0.0
+
+[On November 13, 2020, Helm v2 support formally ended](https://github.com/helm/charts#status-of-the-project). This major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
+
+### To 4.0.0
+
+Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments.
+Use the workaround below to upgrade from versions previous to 4.0.0. The following example assumes that the release name is memcached:
+
+```console
+kubectl delete deployment memcached --cascade=false
+helm upgrade memcached oci://REGISTRY_NAME/REPOSITORY_NAME/memcached
+```
+
+> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`.
+
+### To 3.0.0
+
+This release uses the new bash based `bitnami/memcached` container which uses bash scripts for the start up logic of the container and is smaller in size.
+
+### To 1.0.0
+
+Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments.
+Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is memcached:
+
+```console
+kubectl patch deployment memcached --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
+```
+
+## License
+
+Copyright © 2024 Broadcom. The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+<http://www.apache.org/licenses/LICENSE-2.0>
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
\ No newline at end of file
diff --git a/charts/openproject/charts/memcached/charts/common/.helmignore b/charts/openproject/charts/memcached/charts/common/.helmignore
new file mode 100644
index 0000000..7c7c21d
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/.helmignore
@@ -0,0 +1,24 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
+# img folder
+img/
diff --git a/charts/openproject/charts/memcached/charts/common/Chart.yaml b/charts/openproject/charts/memcached/charts/common/Chart.yaml
new file mode 100644
index 0000000..2acf0cd
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/Chart.yaml
@@ -0,0 +1,23 @@
+annotations:
+ category: Infrastructure
+ licenses: Apache-2.0
+apiVersion: v2
+appVersion: 2.18.0
+description: A Library Helm Chart for grouping common logic between bitnami charts.
+ This chart is not deployable by itself.
+home: https://bitnami.com
+icon: https://bitnami.com/downloads/logos/bitnami-mark.png
+keywords:
+- common
+- helper
+- template
+- function
+- bitnami
+maintainers:
+- name: VMware, Inc.
+ url: https://github.com/bitnami/charts
+name: common
+sources:
+- https://github.com/bitnami/charts
+type: library
+version: 2.18.0
diff --git a/charts/openproject/charts/memcached/charts/common/README.md b/charts/openproject/charts/memcached/charts/common/README.md
new file mode 100644
index 0000000..0d01a1e
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/README.md
@@ -0,0 +1,235 @@
+# Bitnami Common Library Chart
+
+A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts.
+
+## TL;DR
+
+```yaml
+dependencies:
+ - name: common
+ version: 2.x.x
+ repository: oci://registry-1.docker.io/bitnamicharts
+```
+
+```console
+helm dependency update
+```
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.names.fullname" . }}
+data:
+ myvalue: "Hello World"
+```
+
+Looking to use our applications in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog.
+
+## Introduction
+
+This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
+
+## Prerequisites
+
+- Kubernetes 1.23+
+- Helm 3.8.0+
+
+## Parameters
+
+## Special input schemas
+
+### ImageRoot
+
+```yaml
+registry:
+ type: string
+ description: Docker registry where the image is located
+ example: docker.io
+
+repository:
+ type: string
+ description: Repository and image name
+ example: bitnami/nginx
+
+tag:
+ type: string
+ description: image tag
+ example: 1.16.1-debian-10-r63
+
+pullPolicy:
+ type: string
+ description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+
+pullSecrets:
+ type: array
+ items:
+ type: string
+ description: Optionally specify an array of imagePullSecrets (evaluated as templates).
+
+debug:
+ type: boolean
+ description: Set to true if you would like to see extra information on logs
+ example: false
+
+## An instance would be:
+# registry: docker.io
+# repository: bitnami/nginx
+# tag: 1.16.1-debian-10-r63
+# pullPolicy: IfNotPresent
+# debug: false
+```
+
+### Persistence
+
+```yaml
+enabled:
+ type: boolean
+ description: Whether enable persistence.
+ example: true
+
+storageClass:
+ type: string
+ description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning.
+ example: "-"
+
+accessMode:
+ type: string
+ description: Access mode for the Persistent Volume Storage.
+ example: ReadWriteOnce
+
+size:
+ type: string
+ description: Size the Persistent Volume Storage.
+ example: 8Gi
+
+path:
+ type: string
+ description: Path to be persisted.
+ example: /bitnami
+
+## An instance would be:
+# enabled: true
+# storageClass: "-"
+# accessMode: ReadWriteOnce
+# size: 8Gi
+# path: /bitnami
+```
+
+### ExistingSecret
+
+```yaml
+name:
+ type: string
+ description: Name of the existing secret.
+ example: mySecret
+keyMapping:
+ description: Mapping between the expected key name and the name of the key in the existing secret.
+ type: object
+
+## An instance would be:
+# name: mySecret
+# keyMapping:
+# password: myPasswordKey
+```
+
+#### Example of use
+
+When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets.
+
+```yaml
+# templates/secret.yaml
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ labels:
+ app: {{ include "common.names.fullname" . }}
+type: Opaque
+data:
+ password: {{ .Values.password | b64enc | quote }}
+
+# templates/dpl.yaml
+---
+...
+ env:
+ - name: PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }}
+ key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }}
+...
+
+# values.yaml
+---
+name: mySecret
+keyMapping:
+ password: myPasswordKey
+```
+
+### ValidateValue
+
+#### NOTES.txt
+
+```console
+{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}}
+{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}}
+
+{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
+```
+
+If we force those values to be empty we will see some alerts
+
+```console
+helm install test mychart --set path.to.value00="",path.to.value01=""
+ 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value:
+
+ export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d)
+
+ 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value:
+
+ export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d)
+```
+
+## Upgrading
+
+### To 1.0.0
+
+[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
+
+#### What changes were introduced in this major version?
+
+- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
+- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information.
+- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts
+
+#### Considerations when upgrading to this version
+
+- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues
+- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore
+- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3
+
+#### Useful links
+
+- <https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/>
+- <https://helm.sh/docs/topics/v2_v3_migration/>
+- <https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/>
+
+## License
+
+Copyright © 2024 Broadcom. The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+<http://www.apache.org/licenses/LICENSE-2.0>
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/charts/openproject/charts/memcached/charts/common/templates/_affinities.tpl b/charts/openproject/charts/memcached/charts/common/templates/_affinities.tpl
new file mode 100644
index 0000000..e85b1df
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/_affinities.tpl
@@ -0,0 +1,139 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return a soft nodeAffinity definition
+{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.soft" -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: {{ .key }}
+ operator: In
+ values:
+ {{- range .values }}
+ - {{ . | quote }}
+ {{- end }}
+ weight: 1
+{{- end -}}
+
+{{/*
+Return a hard nodeAffinity definition
+{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.hard" -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: {{ .key }}
+ operator: In
+ values:
+ {{- range .values }}
+ - {{ . | quote }}
+ {{- end }}
+{{- end -}}
+
+{{/*
+Return a nodeAffinity definition
+{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes" -}}
+ {{- if eq .type "soft" }}
+ {{- include "common.affinities.nodes.soft" . -}}
+ {{- else if eq .type "hard" }}
+ {{- include "common.affinities.nodes.hard" . -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Return a topologyKey definition
+{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}}
+*/}}
+{{- define "common.affinities.topologyKey" -}}
+{{ .topologyKey | default "kubernetes.io/hostname" -}}
+{{- end -}}
+
+{{/*
+Return a soft podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.soft" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.soft" -}}
+{{- $component := default "" .component -}}
+{{- $customLabels := default (dict) .customLabels -}}
+{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
+{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 10 }}
+ {{- if not (empty $component) }}
+ {{ printf "app.kubernetes.io/component: %s" $component }}
+ {{- end }}
+ {{- range $key, $value := $extraMatchLabels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+ weight: 1
+ {{- range $extraPodAffinityTerms }}
+ - podAffinityTerm:
+ labelSelector:
+ matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 10 }}
+ {{- if not (empty $component) }}
+ {{ printf "app.kubernetes.io/component: %s" $component }}
+ {{- end }}
+ {{- range $key, $value := .extraMatchLabels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+ weight: {{ .weight | default 1 -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Return a hard podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.hard" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.hard" -}}
+{{- $component := default "" .component -}}
+{{- $customLabels := default (dict) .customLabels -}}
+{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
+{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 8 }}
+ {{- if not (empty $component) }}
+ {{ printf "app.kubernetes.io/component: %s" $component }}
+ {{- end }}
+ {{- range $key, $value := $extraMatchLabels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+ {{- range $extraPodAffinityTerms }}
+ - labelSelector:
+ matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 8 }}
+ {{- if not (empty $component) }}
+ {{ printf "app.kubernetes.io/component: %s" $component }}
+ {{- end }}
+ {{- range $key, $value := .extraMatchLabels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Return a podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.pods" -}}
+ {{- if eq .type "soft" }}
+ {{- include "common.affinities.pods.soft" . -}}
+ {{- else if eq .type "hard" }}
+ {{- include "common.affinities.pods.hard" . -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/memcached/charts/common/templates/_capabilities.tpl b/charts/openproject/charts/memcached/charts/common/templates/_capabilities.tpl
new file mode 100644
index 0000000..115674a
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/_capabilities.tpl
@@ -0,0 +1,229 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return the target Kubernetes version
+*/}}
+{{- define "common.capabilities.kubeVersion" -}}
+{{- if .Values.global }}
+ {{- if .Values.global.kubeVersion }}
+ {{- .Values.global.kubeVersion -}}
+ {{- else }}
+ {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
+ {{- end -}}
+{{- else }}
+{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for poddisruptionbudget.
+*/}}
+{{- define "common.capabilities.policy.apiVersion" -}}
+{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "policy/v1beta1" -}}
+{{- else -}}
+{{- print "policy/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for networkpolicy.
+*/}}
+{{- define "common.capabilities.networkPolicy.apiVersion" -}}
+{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for cronjob.
+*/}}
+{{- define "common.capabilities.cronjob.apiVersion" -}}
+{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "batch/v1beta1" -}}
+{{- else -}}
+{{- print "batch/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for daemonset.
+*/}}
+{{- define "common.capabilities.daemonset.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for deployment.
+*/}}
+{{- define "common.capabilities.deployment.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for statefulset.
+*/}}
+{{- define "common.capabilities.statefulset.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apps/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for ingress.
+*/}}
+{{- define "common.capabilities.ingress.apiVersion" -}}
+{{- if .Values.ingress -}}
+{{- if .Values.ingress.apiVersion -}}
+{{- .Values.ingress.apiVersion -}}
+{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end }}
+{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for RBAC resources.
+*/}}
+{{- define "common.capabilities.rbac.apiVersion" -}}
+{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "rbac.authorization.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "rbac.authorization.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for CRDs.
+*/}}
+{{- define "common.capabilities.crd.apiVersion" -}}
+{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiextensions.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiextensions.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for APIService.
+*/}}
+{{- define "common.capabilities.apiService.apiVersion" -}}
+{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiregistration.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiregistration.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for Horizontal Pod Autoscaler.
+*/}}
+{{- define "common.capabilities.hpa.apiVersion" -}}
+{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}}
+{{- if .beta2 -}}
+{{- print "autoscaling/v2beta2" -}}
+{{- else -}}
+{{- print "autoscaling/v2beta1" -}}
+{{- end -}}
+{{- else -}}
+{{- print "autoscaling/v2" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for Vertical Pod Autoscaler.
+*/}}
+{{- define "common.capabilities.vpa.apiVersion" -}}
+{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}}
+{{- if .beta2 -}}
+{{- print "autoscaling/v2beta2" -}}
+{{- else -}}
+{{- print "autoscaling/v2beta1" -}}
+{{- end -}}
+{{- else -}}
+{{- print "autoscaling/v2" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if PodSecurityPolicy is supported
+*/}}
+{{- define "common.capabilities.psp.supported" -}}
+{{- if semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .) -}}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if AdmissionConfiguration is supported
+*/}}
+{{- define "common.capabilities.admissionConfiguration.supported" -}}
+{{- if semverCompare ">=1.23-0" (include "common.capabilities.kubeVersion" .) -}}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for AdmissionConfiguration.
+*/}}
+{{- define "common.capabilities.admissionConfiguration.apiVersion" -}}
+{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiserver.config.k8s.io/v1alpha1" -}}
+{{- else if semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiserver.config.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiserver.config.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for PodSecurityConfiguration.
+*/}}
+{{- define "common.capabilities.podSecurityConfiguration.apiVersion" -}}
+{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "pod-security.admission.config.k8s.io/v1alpha1" -}}
+{{- else if semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "pod-security.admission.config.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "pod-security.admission.config.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if the used Helm version is 3.3+.
+A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure.
+This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error.
+**To be removed when the catalog's minimun Helm version is 3.3**
+*/}}
+{{- define "common.capabilities.supportsHelmVersion" -}}
+{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/memcached/charts/common/templates/_compatibility.tpl b/charts/openproject/charts/memcached/charts/common/templates/_compatibility.tpl
new file mode 100644
index 0000000..c529f08
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/_compatibility.tpl
@@ -0,0 +1,35 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return true if the detected platform is Openshift
+Usage:
+{{- include "common.compatibility.isOpenshift" . -}}
+*/}}
+{{- define "common.compatibility.isOpenshift" -}}
+{{- if .Capabilities.APIVersions.Has "security.openshift.io/v1" -}}
+{{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Render a compatible securityContext depending on the platform. By default it is maintained as it is. In other platforms like Openshift we remove default user/group values that do not work out of the box with the restricted-v1 SCC
+Usage:
+{{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) -}}
+*/}}
+{{- define "common.compatibility.renderSecurityContext" -}}
+{{- $adaptedContext := .secContext -}}
+{{- if .context.Values.global.compatibility -}}
+ {{- if .context.Values.global.compatibility.openshift -}}
+ {{- if or (eq .context.Values.global.compatibility.openshift.adaptSecurityContext "force") (and (eq .context.Values.global.compatibility.openshift.adaptSecurityContext "auto") (include "common.compatibility.isOpenshift" .context)) -}}
+ {{/* Remove incompatible user/group values that do not work in Openshift out of the box */}}
+ {{- $adaptedContext = omit $adaptedContext "fsGroup" "runAsUser" "runAsGroup" -}}
+ {{- end -}}
+ {{- end -}}
+{{- end -}}
+{{- omit $adaptedContext "enabled" | toYaml -}}
+{{- end -}}
diff --git a/charts/openproject/charts/memcached/charts/common/templates/_errors.tpl b/charts/openproject/charts/memcached/charts/common/templates/_errors.tpl
new file mode 100644
index 0000000..07ded6f
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/_errors.tpl
@@ -0,0 +1,28 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Through error when upgrading using empty passwords values that must not be empty.
+
+Usage:
+{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}}
+{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}}
+{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }}
+
+Required password params:
+ - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error.
+ - context - Context - Required. Parent context.
+*/}}
+{{- define "common.errors.upgrade.passwords.empty" -}}
+ {{- $validationErrors := join "" .validationErrors -}}
+ {{- if and $validationErrors .context.Release.IsUpgrade -}}
+ {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}}
+ {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}}
+ {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}}
+ {{- $errorString = print $errorString "\n%s" -}}
+ {{- printf $errorString $validationErrors | fail -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/memcached/charts/common/templates/_images.tpl b/charts/openproject/charts/memcached/charts/common/templates/_images.tpl
new file mode 100644
index 0000000..1bcb779
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/_images.tpl
@@ -0,0 +1,117 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Return the proper image name
+{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }}
+*/}}
+{{- define "common.images.image" -}}
+{{- $registryName := .imageRoot.registry -}}
+{{- $repositoryName := .imageRoot.repository -}}
+{{- $separator := ":" -}}
+{{- $termination := .imageRoot.tag | toString -}}
+{{- if .global }}
+ {{- if .global.imageRegistry }}
+ {{- $registryName = .global.imageRegistry -}}
+ {{- end -}}
+{{- end -}}
+{{- if .imageRoot.digest }}
+ {{- $separator = "@" -}}
+ {{- $termination = .imageRoot.digest | toString -}}
+{{- end -}}
+{{- if $registryName }}
+ {{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
+{{- else -}}
+ {{- printf "%s%s%s" $repositoryName $separator $termination -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead)
+{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }}
+*/}}
+{{- define "common.images.pullSecrets" -}}
+ {{- $pullSecrets := list }}
+
+ {{- if .global }}
+ {{- range .global.imagePullSecrets -}}
+ {{- if kindIs "map" . -}}
+ {{- $pullSecrets = append $pullSecrets .name -}}
+ {{- else -}}
+ {{- $pullSecrets = append $pullSecrets . -}}
+ {{- end }}
+ {{- end -}}
+ {{- end -}}
+
+ {{- range .images -}}
+ {{- range .pullSecrets -}}
+ {{- if kindIs "map" . -}}
+ {{- $pullSecrets = append $pullSecrets .name -}}
+ {{- else -}}
+ {{- $pullSecrets = append $pullSecrets . -}}
+ {{- end -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- if (not (empty $pullSecrets)) }}
+imagePullSecrets:
+ {{- range $pullSecrets | uniq }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names evaluating values as templates
+{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }}
+*/}}
+{{- define "common.images.renderPullSecrets" -}}
+ {{- $pullSecrets := list }}
+ {{- $context := .context }}
+
+ {{- if $context.Values.global }}
+ {{- range $context.Values.global.imagePullSecrets -}}
+ {{- if kindIs "map" . -}}
+ {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" .name "context" $context)) -}}
+ {{- else -}}
+ {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
+ {{- end -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- range .images -}}
+ {{- range .pullSecrets -}}
+ {{- if kindIs "map" . -}}
+ {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" .name "context" $context)) -}}
+ {{- else -}}
+ {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
+ {{- end -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- if (not (empty $pullSecrets)) }}
+imagePullSecrets:
+ {{- range $pullSecrets | uniq }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+{{- end -}}
+
+{{/*
+Return the proper image version (ingores image revision/prerelease info & fallbacks to chart appVersion)
+{{ include "common.images.version" ( dict "imageRoot" .Values.path.to.the.image "chart" .Chart ) }}
+*/}}
+{{- define "common.images.version" -}}
+{{- $imageTag := .imageRoot.tag | toString -}}
+{{/* regexp from https://github.com/Masterminds/semver/blob/23f51de38a0866c5ef0bfc42b3f735c73107b700/version.go#L41-L44 */}}
+{{- if regexMatch `^([0-9]+)(\.[0-9]+)?(\.[0-9]+)?(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?$` $imageTag -}}
+ {{- $version := semver $imageTag -}}
+ {{- printf "%d.%d.%d" $version.Major $version.Minor $version.Patch -}}
+{{- else -}}
+ {{- print .chart.AppVersion -}}
+{{- end -}}
+{{- end -}}
+
diff --git a/charts/openproject/charts/memcached/charts/common/templates/_ingress.tpl b/charts/openproject/charts/memcached/charts/common/templates/_ingress.tpl
new file mode 100644
index 0000000..efa5b85
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/_ingress.tpl
@@ -0,0 +1,73 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Generate backend entry that is compatible with all Kubernetes API versions.
+
+Usage:
+{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }}
+
+Params:
+ - serviceName - String. Name of an existing service backend
+ - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer.
+ - context - Dict - Required. The context for the template evaluation.
+*/}}
+{{- define "common.ingress.backend" -}}
+{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}}
+{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}}
+serviceName: {{ .serviceName }}
+servicePort: {{ .servicePort }}
+{{- else -}}
+service:
+ name: {{ .serviceName }}
+ port:
+ {{- if typeIs "string" .servicePort }}
+ name: {{ .servicePort }}
+ {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }}
+ number: {{ .servicePort | int }}
+ {{- end }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Print "true" if the API pathType field is supported
+Usage:
+{{ include "common.ingress.supportsPathType" . }}
+*/}}
+{{- define "common.ingress.supportsPathType" -}}
+{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}}
+{{- print "false" -}}
+{{- else -}}
+{{- print "true" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if the ingressClassname field is supported
+Usage:
+{{ include "common.ingress.supportsIngressClassname" . }}
+*/}}
+{{- define "common.ingress.supportsIngressClassname" -}}
+{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "false" -}}
+{{- else -}}
+{{- print "true" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if cert-manager required annotations for TLS signed
+certificates are set in the Ingress annotations
+Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
+Usage:
+{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }}
+*/}}
+{{- define "common.ingress.certManagerRequest" -}}
+{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/memcached/charts/common/templates/_labels.tpl b/charts/openproject/charts/memcached/charts/common/templates/_labels.tpl
new file mode 100644
index 0000000..d90a6cd
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/_labels.tpl
@@ -0,0 +1,46 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Kubernetes standard labels
+{{ include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) -}}
+*/}}
+{{- define "common.labels.standard" -}}
+{{- if and (hasKey . "customLabels") (hasKey . "context") -}}
+{{- $default := dict "app.kubernetes.io/name" (include "common.names.name" .context) "helm.sh/chart" (include "common.names.chart" .context) "app.kubernetes.io/instance" .context.Release.Name "app.kubernetes.io/managed-by" .context.Release.Service -}}
+{{- with .context.Chart.AppVersion -}}
+{{- $_ := set $default "app.kubernetes.io/version" . -}}
+{{- end -}}
+{{ template "common.tplvalues.merge" (dict "values" (list .customLabels $default) "context" .context) }}
+{{- else -}}
+app.kubernetes.io/name: {{ include "common.names.name" . }}
+helm.sh/chart: {{ include "common.names.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- with .Chart.AppVersion }}
+app.kubernetes.io/version: {{ . | quote }}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Labels used on immutable fields such as deploy.spec.selector.matchLabels or svc.spec.selector
+{{ include "common.labels.matchLabels" (dict "customLabels" .Values.podLabels "context" $) -}}
+
+We don't want to loop over custom labels appending them to the selector
+since it's very likely that it will break deployments, services, etc.
+However, it's important to overwrite the standard labels if the user
+overwrote them on metadata.labels fields.
+*/}}
+{{- define "common.labels.matchLabels" -}}
+{{- if and (hasKey . "customLabels") (hasKey . "context") -}}
+{{ merge (pick (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) "app.kubernetes.io/name" "app.kubernetes.io/instance") (dict "app.kubernetes.io/name" (include "common.names.name" .context) "app.kubernetes.io/instance" .context.Release.Name ) | toYaml }}
+{{- else -}}
+app.kubernetes.io/name: {{ include "common.names.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/memcached/charts/common/templates/_names.tpl b/charts/openproject/charts/memcached/charts/common/templates/_names.tpl
new file mode 100644
index 0000000..a222924
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/_names.tpl
@@ -0,0 +1,71 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "common.names.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "common.names.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "common.names.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified dependency name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+Usage:
+{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }}
+*/}}
+{{- define "common.names.dependency.fullname" -}}
+{{- if .chartValues.fullnameOverride -}}
+{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .chartName .chartValues.nameOverride -}}
+{{- if contains $name .context.Release.Name -}}
+{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Allow the release namespace to be overridden for multi-namespace deployments in combined charts.
+*/}}
+{{- define "common.names.namespace" -}}
+{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a fully qualified app name adding the installation's namespace.
+*/}}
+{{- define "common.names.fullname.namespace" -}}
+{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/charts/openproject/charts/memcached/charts/common/templates/_resources.tpl b/charts/openproject/charts/memcached/charts/common/templates/_resources.tpl
new file mode 100644
index 0000000..d90f875
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/_resources.tpl
@@ -0,0 +1,50 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return a resource request/limit object based on a given preset.
+These presets are for basic testing and not meant to be used in production
+{{ include "common.resources.preset" (dict "type" "nano") -}}
+*/}}
+{{- define "common.resources.preset" -}}
+{{/* The limits are the requests increased by 50% (except ephemeral-storage)*/}}
+{{- $presets := dict
+ "nano" (dict
+ "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
+ "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "1024Mi")
+ )
+ "micro" (dict
+ "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
+ "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "1024Mi")
+ )
+ "small" (dict
+ "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
+ "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "1024Mi")
+ )
+ "medium" (dict
+ "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
+ "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "1024Mi")
+ )
+ "large" (dict
+ "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
+ "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi")
+ )
+ "xlarge" (dict
+ "requests" (dict "cpu" "2.0" "memory" "4096Mi" "ephemeral-storage" "50Mi")
+ "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi")
+ )
+ "2xlarge" (dict
+ "requests" (dict "cpu" "4.0" "memory" "8192Mi" "ephemeral-storage" "50Mi")
+ "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi")
+ )
+ }}
+{{- if hasKey $presets .type -}}
+{{- index $presets .type | toYaml -}}
+{{- else -}}
+{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
+{{- end -}}
+{{- end -}}
\ No newline at end of file
diff --git a/charts/openproject/charts/memcached/charts/common/templates/_secrets.tpl b/charts/openproject/charts/memcached/charts/common/templates/_secrets.tpl
new file mode 100644
index 0000000..84dbe38
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/_secrets.tpl
@@ -0,0 +1,182 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Generate secret name.
+
+Usage:
+{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }}
+
+Params:
+ - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
+ to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+ +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
+ - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment.
+ - context - Dict - Required. The context for the template evaluation.
+*/}}
+{{- define "common.secrets.name" -}}
+{{- $name := (include "common.names.fullname" .context) -}}
+
+{{- if .defaultNameSuffix -}}
+{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- with .existingSecret -}}
+{{- if not (typeIs "string" .) -}}
+{{- with .name -}}
+{{- $name = . -}}
+{{- end -}}
+{{- else -}}
+{{- $name = . -}}
+{{- end -}}
+{{- end -}}
+
+{{- printf "%s" $name -}}
+{{- end -}}
+
+{{/*
+Generate secret key.
+
+Usage:
+{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }}
+
+Params:
+ - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
+ to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+ +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
+ - key - String - Required. Name of the key in the secret.
+*/}}
+{{- define "common.secrets.key" -}}
+{{- $key := .key -}}
+
+{{- if .existingSecret -}}
+ {{- if not (typeIs "string" .existingSecret) -}}
+ {{- if .existingSecret.keyMapping -}}
+ {{- $key = index .existingSecret.keyMapping $.key -}}
+ {{- end -}}
+ {{- end }}
+{{- end -}}
+
+{{- printf "%s" $key -}}
+{{- end -}}
+
+{{/*
+Generate secret password or retrieve one if already created.
+
+Usage:
+{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }}
+
+Params:
+ - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+ - key - String - Required - Name of the key in the secret.
+ - providedValues - List<String> - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
+ - length - int - Optional - Length of the generated random password.
+ - strong - Boolean - Optional - Whether to add symbols to the generated random password.
+ - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart.
+ - context - Context - Required - Parent context.
+ - failOnNew - Boolean - Optional - Default to true. If set to false, skip errors adding new keys to existing secrets.
+ - skipB64enc - Boolean - Optional - Default to false. If set to true, no the secret will not be base64 encrypted.
+ - skipQuote - Boolean - Optional - Default to false. If set to true, no quotes will be added around the secret.
+The order in which this function returns a secret password:
+ 1. Already existing 'Secret' resource
+ (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned)
+ 2. Password provided via the values.yaml
+ (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned)
+ 3. Randomly generated secret password
+ (A new random secret password with the length specified in the 'length' parameter will be generated and returned)
+
+*/}}
+{{- define "common.secrets.passwords.manage" -}}
+
+{{- $password := "" }}
+{{- $subchart := "" }}
+{{- $chartName := default "" .chartName }}
+{{- $passwordLength := default 10 .length }}
+{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }}
+{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }}
+{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }}
+{{- if $secretData }}
+ {{- if hasKey $secretData .key }}
+ {{- $password = index $secretData .key | b64dec }}
+ {{- else if not (eq .failOnNew false) }}
+ {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}}
+ {{- else if $providedPasswordValue }}
+ {{- $password = $providedPasswordValue | toString }}
+ {{- end -}}
+{{- else if $providedPasswordValue }}
+ {{- $password = $providedPasswordValue | toString }}
+{{- else }}
+
+ {{- if .context.Values.enabled }}
+ {{- $subchart = $chartName }}
+ {{- end -}}
+
+ {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}}
+ {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}}
+ {{- $passwordValidationErrors := list $requiredPasswordError -}}
+ {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}}
+
+ {{- if .strong }}
+ {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }}
+ {{- $password = randAscii $passwordLength }}
+ {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }}
+ {{- $password = printf "%s%s" $subStr $password | toString | shuffle }}
+ {{- else }}
+ {{- $password = randAlphaNum $passwordLength }}
+ {{- end }}
+{{- end -}}
+{{- if not .skipB64enc }}
+{{- $password = $password | b64enc }}
+{{- end -}}
+{{- if .skipQuote -}}
+{{- printf "%s" $password -}}
+{{- else -}}
+{{- printf "%s" $password | quote -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Reuses the value from an existing secret, otherwise sets its value to a default value.
+
+Usage:
+{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }}
+
+Params:
+ - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+ - key - String - Required - Name of the key in the secret.
+ - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
+ - context - Context - Required - Parent context.
+
+*/}}
+{{- define "common.secrets.lookup" -}}
+{{- $value := "" -}}
+{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}}
+{{- if and $secretData (hasKey $secretData .key) -}}
+ {{- $value = index $secretData .key -}}
+{{- else if .defaultValue -}}
+ {{- $value = .defaultValue | toString | b64enc -}}
+{{- end -}}
+{{- if $value -}}
+{{- printf "%s" $value -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns whether a previous generated secret already exists
+
+Usage:
+{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }}
+
+Params:
+ - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+ - context - Context - Required - Parent context.
+*/}}
+{{- define "common.secrets.exists" -}}
+{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }}
+{{- if $secret }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/memcached/charts/common/templates/_storage.tpl b/charts/openproject/charts/memcached/charts/common/templates/_storage.tpl
new file mode 100644
index 0000000..16405a0
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/_storage.tpl
@@ -0,0 +1,28 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Return the proper Storage Class
+{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }}
+*/}}
+{{- define "common.storage.class" -}}
+
+{{- $storageClass := .persistence.storageClass -}}
+{{- if .global -}}
+ {{- if .global.storageClass -}}
+ {{- $storageClass = .global.storageClass -}}
+ {{- end -}}
+{{- end -}}
+
+{{- if $storageClass -}}
+ {{- if (eq "-" $storageClass) -}}
+ {{- printf "storageClassName: \"\"" -}}
+ {{- else }}
+ {{- printf "storageClassName: %s" $storageClass -}}
+ {{- end -}}
+{{- end -}}
+
+{{- end -}}
diff --git a/charts/openproject/charts/memcached/charts/common/templates/_tplvalues.tpl b/charts/openproject/charts/memcached/charts/common/templates/_tplvalues.tpl
new file mode 100644
index 0000000..a8ed763
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/_tplvalues.tpl
@@ -0,0 +1,38 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Renders a value that contains template perhaps with scope if the scope is present.
+Usage:
+{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ ) }}
+{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ "scope" $app ) }}
+*/}}
+{{- define "common.tplvalues.render" -}}
+{{- $value := typeIs "string" .value | ternary .value (.value | toYaml) }}
+{{- if contains "{{" (toJson .value) }}
+ {{- if .scope }}
+ {{- tpl (cat "{{- with $.RelativeScope -}}" $value "{{- end }}") (merge (dict "RelativeScope" .scope) .context) }}
+ {{- else }}
+ {{- tpl $value .context }}
+ {{- end }}
+{{- else }}
+ {{- $value }}
+{{- end }}
+{{- end -}}
+
+{{/*
+Merge a list of values that contains template after rendering them.
+Merge precedence is consistent with http://masterminds.github.io/sprig/dicts.html#merge-mustmerge
+Usage:
+{{ include "common.tplvalues.merge" ( dict "values" (list .Values.path.to.the.Value1 .Values.path.to.the.Value2) "context" $ ) }}
+*/}}
+{{- define "common.tplvalues.merge" -}}
+{{- $dst := dict -}}
+{{- range .values -}}
+{{- $dst = include "common.tplvalues.render" (dict "value" . "context" $.context "scope" $.scope) | fromYaml | merge $dst -}}
+{{- end -}}
+{{ $dst | toYaml }}
+{{- end -}}
diff --git a/charts/openproject/charts/memcached/charts/common/templates/_utils.tpl b/charts/openproject/charts/memcached/charts/common/templates/_utils.tpl
new file mode 100644
index 0000000..bfbddf0
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/_utils.tpl
@@ -0,0 +1,77 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Print instructions to get a secret value.
+Usage:
+{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }}
+*/}}
+{{- define "common.utils.secret.getvalue" -}}
+{{- $varname := include "common.utils.fieldToEnvVar" . -}}
+export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d)
+{{- end -}}
+
+{{/*
+Build env var name given a field
+Usage:
+{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }}
+*/}}
+{{- define "common.utils.fieldToEnvVar" -}}
+ {{- $fieldNameSplit := splitList "-" .field -}}
+ {{- $upperCaseFieldNameSplit := list -}}
+
+ {{- range $fieldNameSplit -}}
+ {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}}
+ {{- end -}}
+
+ {{ join "_" $upperCaseFieldNameSplit }}
+{{- end -}}
+
+{{/*
+Gets a value from .Values given
+Usage:
+{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }}
+*/}}
+{{- define "common.utils.getValueFromKey" -}}
+{{- $splitKey := splitList "." .key -}}
+{{- $value := "" -}}
+{{- $latestObj := $.context.Values -}}
+{{- range $splitKey -}}
+ {{- if not $latestObj -}}
+ {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}}
+ {{- end -}}
+ {{- $value = ( index $latestObj . ) -}}
+ {{- $latestObj = $value -}}
+{{- end -}}
+{{- printf "%v" (default "" $value) -}}
+{{- end -}}
+
+{{/*
+Returns first .Values key with a defined value or first of the list if all non-defined
+Usage:
+{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }}
+*/}}
+{{- define "common.utils.getKeyFromList" -}}
+{{- $key := first .keys -}}
+{{- $reverseKeys := reverse .keys }}
+{{- range $reverseKeys }}
+ {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }}
+ {{- if $value -}}
+ {{- $key = . }}
+ {{- end -}}
+{{- end -}}
+{{- printf "%s" $key -}}
+{{- end -}}
+
+{{/*
+Checksum a template at "path" containing a *single* resource (ConfigMap,Secret) for use in pod annotations, excluding the metadata (see #18376).
+Usage:
+{{ include "common.utils.checksumTemplate" (dict "path" "/configmap.yaml" "context" $) }}
+*/}}
+{{- define "common.utils.checksumTemplate" -}}
+{{- $obj := include (print .context.Template.BasePath .path) .context | fromYaml -}}
+{{ omit $obj "apiVersion" "kind" "metadata" | toYaml | sha256sum }}
+{{- end -}}
diff --git a/charts/openproject/charts/memcached/charts/common/templates/_warnings.tpl b/charts/openproject/charts/memcached/charts/common/templates/_warnings.tpl
new file mode 100644
index 0000000..0f763cd
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/_warnings.tpl
@@ -0,0 +1,82 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Warning about using rolling tag.
+Usage:
+{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }}
+*/}}
+{{- define "common.warnings.rollingTag" -}}
+
+{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }}
+WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
++info https://docs.bitnami.com/tutorials/understand-rolling-tags-containers
+{{- end }}
+{{- end -}}
+
+{{/*
+Warning about not setting the resource object in all deployments.
+Usage:
+{{ include "common.warnings.resources" (dict "sections" (list "path1" "path2") context $) }}
+Example:
+{{- include "common.warnings.resources" (dict "sections" (list "csiProvider.provider" "server" "volumePermissions" "") "context" $) }}
+The list in the example assumes that the following values exist:
+ - csiProvider.provider.resources
+ - server.resources
+ - volumePermissions.resources
+ - resources
+*/}}
+{{- define "common.warnings.resources" -}}
+{{- $values := .context.Values -}}
+{{- $printMessage := false -}}
+{{ $affectedSections := list -}}
+{{- range .sections -}}
+ {{- if eq . "" -}}
+ {{/* Case where the resources section is at the root (one main deployment in the chart) */}}
+ {{- if not (index $values "resources") -}}
+ {{- $affectedSections = append $affectedSections "resources" -}}
+ {{- $printMessage = true -}}
+ {{- end -}}
+ {{- else -}}
+ {{/* Case where the are multiple resources sections (more than one main deployment in the chart) */}}
+ {{- $keys := split "." . -}}
+ {{/* We iterate through the different levels until arriving to the resource section. Example: a.b.c.resources */}}
+ {{- $section := $values -}}
+ {{- range $keys -}}
+ {{- $section = index $section . -}}
+ {{- end -}}
+ {{- if not (index $section "resources") -}}
+ {{/* If the section has enabled=false or replicaCount=0, do not include it */}}
+ {{- if and (hasKey $section "enabled") -}}
+ {{- if index $section "enabled" -}}
+ {{/* enabled=true */}}
+ {{- $affectedSections = append $affectedSections (printf "%s.resources" .) -}}
+ {{- $printMessage = true -}}
+ {{- end -}}
+ {{- else if and (hasKey $section "replicaCount") -}}
+ {{/* We need a casting to int because number 0 is not treated as an int by default */}}
+ {{- if (gt (index $section "replicaCount" | int) 0) -}}
+ {{/* replicaCount > 0 */}}
+ {{- $affectedSections = append $affectedSections (printf "%s.resources" .) -}}
+ {{- $printMessage = true -}}
+ {{- end -}}
+ {{- else -}}
+ {{/* Default case, add it to the affected sections */}}
+ {{- $affectedSections = append $affectedSections (printf "%s.resources" .) -}}
+ {{- $printMessage = true -}}
+ {{- end -}}
+ {{- end -}}
+ {{- end -}}
+{{- end -}}
+{{- if $printMessage }}
+
+WARNING: There are "resources" sections in the chart not set. Using "resourcesPreset" is not recommended for production. For production installations, please set the following values according to your workload needs:
+{{- range $affectedSections }}
+ - {{ . }}
+{{- end }}
++info https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+{{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/memcached/charts/common/templates/validations/_cassandra.tpl b/charts/openproject/charts/memcached/charts/common/templates/validations/_cassandra.tpl
new file mode 100644
index 0000000..eda9aad
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/validations/_cassandra.tpl
@@ -0,0 +1,77 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate Cassandra required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret"
+ - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.cassandra.passwords" -}}
+ {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}}
+ {{- $enabled := include "common.cassandra.values.enabled" . -}}
+ {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}}
+ {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.cassandra.values.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.cassandra.values.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.cassandra.dbUser.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.dbUser.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled cassandra.
+
+Usage:
+{{ include "common.cassandra.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.cassandra.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.cassandra.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key dbUser
+
+Usage:
+{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.cassandra.values.key.dbUser" -}}
+ {{- if .subchart -}}
+ cassandra.dbUser
+ {{- else -}}
+ dbUser
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/memcached/charts/common/templates/validations/_mariadb.tpl b/charts/openproject/charts/memcached/charts/common/templates/validations/_mariadb.tpl
new file mode 100644
index 0000000..17d83a2
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/validations/_mariadb.tpl
@@ -0,0 +1,108 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MariaDB required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret"
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mariadb.passwords" -}}
+ {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}}
+ {{- $enabled := include "common.mariadb.values.enabled" . -}}
+ {{- $architecture := include "common.mariadb.values.architecture" . -}}
+ {{- $authPrefix := include "common.mariadb.values.key.auth" . -}}
+ {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+ {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+ {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+ {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+ {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+ {{- if not (empty $valueUsername) -}}
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+ {{- end -}}
+
+ {{- if (eq $architecture "replication") -}}
+ {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.auth.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mariadb.auth.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.auth.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mariadb.
+
+Usage:
+{{ include "common.mariadb.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mariadb.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.mariadb.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.architecture" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mariadb.architecture -}}
+ {{- else -}}
+ {{- .context.Values.architecture -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.key.auth" -}}
+ {{- if .subchart -}}
+ mariadb.auth
+ {{- else -}}
+ auth
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/memcached/charts/common/templates/validations/_mongodb.tpl b/charts/openproject/charts/memcached/charts/common/templates/validations/_mongodb.tpl
new file mode 100644
index 0000000..bbb445b
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/validations/_mongodb.tpl
@@ -0,0 +1,113 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MongoDB® required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret"
+ - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mongodb.passwords" -}}
+ {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}}
+ {{- $enabled := include "common.mongodb.values.enabled" . -}}
+ {{- $authPrefix := include "common.mongodb.values.key.auth" . -}}
+ {{- $architecture := include "common.mongodb.values.architecture" . -}}
+ {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+ {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+ {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}}
+ {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+ {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}}
+ {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}}
+
+ {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+ {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+ {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }}
+ {{- if and $valueUsername $valueDatabase -}}
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+ {{- end -}}
+
+ {{- if (eq $architecture "replicaset") -}}
+ {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.auth.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mongodb.auth.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.auth.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mongodb.
+
+Usage:
+{{ include "common.mongodb.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mongodb.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.mongodb.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.key.auth" -}}
+ {{- if .subchart -}}
+ mongodb.auth
+ {{- else -}}
+ auth
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.architecture" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mongodb.architecture -}}
+ {{- else -}}
+ {{- .context.Values.architecture -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/memcached/charts/common/templates/validations/_mysql.tpl b/charts/openproject/charts/memcached/charts/common/templates/validations/_mysql.tpl
new file mode 100644
index 0000000..ca3953f
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/validations/_mysql.tpl
@@ -0,0 +1,108 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MySQL required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret"
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mysql.passwords" -}}
+ {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}}
+ {{- $enabled := include "common.mysql.values.enabled" . -}}
+ {{- $architecture := include "common.mysql.values.architecture" . -}}
+ {{- $authPrefix := include "common.mysql.values.key.auth" . -}}
+ {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+ {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+ {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+ {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+ {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+ {{- if not (empty $valueUsername) -}}
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+ {{- end -}}
+
+ {{- if (eq $architecture "replication") -}}
+ {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.auth.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mysql.auth.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.auth.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mysql.
+
+Usage:
+{{ include "common.mysql.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mysql.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.mysql.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.architecture" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mysql.architecture -}}
+ {{- else -}}
+ {{- .context.Values.architecture -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.key.auth" -}}
+ {{- if .subchart -}}
+ mysql.auth
+ {{- else -}}
+ auth
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/memcached/charts/common/templates/validations/_postgresql.tpl b/charts/openproject/charts/memcached/charts/common/templates/validations/_postgresql.tpl
new file mode 100644
index 0000000..8c9aa57
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/validations/_postgresql.tpl
@@ -0,0 +1,134 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate PostgreSQL required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret"
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.postgresql.passwords" -}}
+ {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}}
+ {{- $enabled := include "common.postgresql.values.enabled" . -}}
+ {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}}
+ {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}}
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+ {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}}
+
+ {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}}
+ {{- if (eq $enabledReplication "true") -}}
+ {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to decide whether evaluate global values.
+
+Usage:
+{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }}
+Params:
+ - key - String - Required. Field to be evaluated within global, e.g: "existingSecret"
+*/}}
+{{- define "common.postgresql.values.use.global" -}}
+ {{- if .context.Values.global -}}
+ {{- if .context.Values.global.postgresql -}}
+ {{- index .context.Values.global.postgresql .key | quote -}}
+ {{- end -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.postgresql.values.existingSecret" (dict "context" $) }}
+*/}}
+{{- define "common.postgresql.values.existingSecret" -}}
+ {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}}
+
+ {{- if .subchart -}}
+ {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}}
+ {{- else -}}
+ {{- default (.context.Values.existingSecret | quote) $globalValue -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled postgresql.
+
+Usage:
+{{ include "common.postgresql.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.postgresql.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.postgresql.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key postgressPassword.
+
+Usage:
+{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.key.postgressPassword" -}}
+ {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}}
+
+ {{- if not $globalValue -}}
+ {{- if .subchart -}}
+ postgresql.postgresqlPassword
+ {{- else -}}
+ postgresqlPassword
+ {{- end -}}
+ {{- else -}}
+ global.postgresql.postgresqlPassword
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled.replication.
+
+Usage:
+{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.enabled.replication" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.postgresql.replication.enabled -}}
+ {{- else -}}
+ {{- printf "%v" .context.Values.replication.enabled -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key replication.password.
+
+Usage:
+{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.key.replicationPassword" -}}
+ {{- if .subchart -}}
+ postgresql.replication.password
+ {{- else -}}
+ replication.password
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/memcached/charts/common/templates/validations/_redis.tpl b/charts/openproject/charts/memcached/charts/common/templates/validations/_redis.tpl
new file mode 100644
index 0000000..fc0d208
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/validations/_redis.tpl
@@ -0,0 +1,81 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate Redis® required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret"
+ - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.redis.passwords" -}}
+ {{- $enabled := include "common.redis.values.enabled" . -}}
+ {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}}
+ {{- $standarizedVersion := include "common.redis.values.standarized.version" . }}
+
+ {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }}
+ {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }}
+
+ {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }}
+ {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}}
+ {{- if eq $useAuth "true" -}}
+ {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled redis.
+
+Usage:
+{{ include "common.redis.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.redis.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.redis.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right prefix path for the values
+
+Usage:
+{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false
+*/}}
+{{- define "common.redis.values.keys.prefix" -}}
+ {{- if .subchart -}}redis.{{- else -}}{{- end -}}
+{{- end -}}
+
+{{/*
+Checks whether the redis chart's includes the standarizations (version >= 14)
+
+Usage:
+{{ include "common.redis.values.standarized.version" (dict "context" $) }}
+*/}}
+{{- define "common.redis.values.standarized.version" -}}
+
+ {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}}
+ {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }}
+
+ {{- if $standarizedAuthValues -}}
+ {{- true -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/memcached/charts/common/templates/validations/_validations.tpl b/charts/openproject/charts/memcached/charts/common/templates/validations/_validations.tpl
new file mode 100644
index 0000000..31ceda8
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/templates/validations/_validations.tpl
@@ -0,0 +1,51 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate values must not be empty.
+
+Usage:
+{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}}
+{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}}
+{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
+
+Validate value params:
+ - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password"
+ - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret"
+ - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password"
+*/}}
+{{- define "common.validations.values.multiple.empty" -}}
+ {{- range .required -}}
+ {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Validate a value must not be empty.
+
+Usage:
+{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }}
+
+Validate value params:
+ - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password"
+ - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret"
+ - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password"
+ - subchart - String - Optional - Name of the subchart that the validated password is part of.
+*/}}
+{{- define "common.validations.values.single.empty" -}}
+ {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }}
+ {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }}
+
+ {{- if not $value -}}
+ {{- $varname := "my-value" -}}
+ {{- $getCurrentValue := "" -}}
+ {{- if and .secret .field -}}
+ {{- $varname = include "common.utils.fieldToEnvVar" . -}}
+ {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}}
+ {{- end -}}
+ {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/memcached/charts/common/values.yaml b/charts/openproject/charts/memcached/charts/common/values.yaml
new file mode 100644
index 0000000..9abe0e1
--- /dev/null
+++ b/charts/openproject/charts/memcached/charts/common/values.yaml
@@ -0,0 +1,8 @@
+# Copyright VMware, Inc.
+# SPDX-License-Identifier: APACHE-2.0
+
+## bitnami/common
+## It is required by CI/CD tools and processes.
+## @skip exampleValue
+##
+exampleValue: common-chart
diff --git a/charts/openproject/charts/memcached/templates/NOTES.txt b/charts/openproject/charts/memcached/templates/NOTES.txt
new file mode 100644
index 0000000..626d0da
--- /dev/null
+++ b/charts/openproject/charts/memcached/templates/NOTES.txt
@@ -0,0 +1,44 @@
+CHART NAME: {{ .Chart.Name }}
+CHART VERSION: {{ .Chart.Version }}
+APP VERSION: {{ .Chart.AppVersion }}
+
+{{- if and (not .Values.auth.enabled) (contains .Values.service.type "LoadBalancer") }}
+-------------------------------------------------------------------------------
+ WARNING
+
+ By specifying "serviceType=LoadBalancer" and not specifying "auth.enabled=true"
+ you have most likely exposed the Memcached service externally without any
+ authentication mechanism.
+
+ For security reasons, we strongly suggest that you switch to "ClusterIP" or
+ "NodePort". As alternative, you can also specify valid credentials using the
+ "auth.username" and "auth.password" parameters.
+
+-------------------------------------------------------------------------------
+{{- end }}
+
+** Please be patient while the chart is being deployed **
+
+Memcached can be accessed via port {{ .Values.service.ports.memcached }} on the following DNS name from within your cluster:
+
+ {{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}
+
+{{- if eq .Values.architecture "high-availability" }}
+
+Please see https://github.com/memcached/memcached/wiki/ConfiguringClient to understand the Memcached model and need for client-based consistent hashing.
+You might also want to consider more advanced routing/replication approaches with mcrouter: https://github.com/facebook/mcrouter/wiki/Replicated-pools-setup
+
+{{- end }}
+
+{{- if .Values.metrics.enabled }}
+
+To access the Memcached Prometheus metrics from outside the cluster execute the following commands:
+
+ kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ printf "%s-metrics" (include "common.names.fullname" .) }} {{ .Values.metrics.service.ports.metrics }}:{{ .Values.metrics.service.ports.metrics }} &
+ curl http://127.0.0.1:{{ .Values.metrics.service.ports.metrics }}/metrics
+
+{{- end }}
+
+{{- include "memcached.validateValues" . }}
+{{- include "memcached.checkRollingTags" . }}
+{{- include "common.warnings.resources" (dict "sections" (list "metrics" "" "volumePermissions") "context" $) }}
diff --git a/charts/openproject/charts/memcached/templates/_helpers.tpl b/charts/openproject/charts/memcached/templates/_helpers.tpl
new file mode 100644
index 0000000..1614397
--- /dev/null
+++ b/charts/openproject/charts/memcached/templates/_helpers.tpl
@@ -0,0 +1,121 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return the proper Memcached image name
+*/}}
+{{- define "memcached.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper image name (for the metrics image)
+*/}}
+{{- define "memcached.metrics.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }}
+{{- end -}}
+
+
+{{/*
+Return the proper image name (for the init container volume-permissions image)
+*/}}
+{{- define "memcached.volumePermissions.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names
+*/}}
+{{- define "memcached.imagePullSecrets" -}}
+{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.volumePermissions.image) "global" .Values.global) -}}
+{{- end -}}
+
+{{/*
+ Create the name of the service account to use
+ */}}
+{{- define "memcached.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Check if there are rolling tags in the images
+*/}}
+{{- define "memcached.checkRollingTags" -}}
+{{- include "common.warnings.rollingTag" .Values.image }}
+{{- include "common.warnings.rollingTag" .Values.metrics.image }}
+{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }}
+{{- end -}}
+
+{{/*
+Compile all warnings into a single message, and call fail.
+*/}}
+{{- define "memcached.validateValues" -}}
+{{- $messages := list -}}
+{{- $messages := append $messages (include "memcached.validateValues.architecture" .) -}}
+{{- $messages := append $messages (include "memcached.validateValues.replicaCount" .) -}}
+{{- $messages := append $messages (include "memcached.validateValues.auth" .) -}}
+{{- $messages := append $messages (include "memcached.validateValues.readOnlyRootFilesystem" .) -}}
+{{- $messages := without $messages "" -}}
+{{- $message := join "\n" $messages -}}
+
+{{- if $message -}}
+{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}}
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Memcached - must provide a valid architecture */}}
+{{- define "memcached.validateValues.architecture" -}}
+{{- if and (ne .Values.architecture "standalone") (ne .Values.architecture "high-availability") -}}
+memcached: architecture
+ Invalid architecture selected. Valid values are "standalone" and
+ "high-availability". Please set a valid architecture (--set architecture="xxxx")
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Memcached - number of replicas */}}
+{{- define "memcached.validateValues.replicaCount" -}}
+{{- $replicaCount := int .Values.replicaCount }}
+{{- if and (eq .Values.architecture "standalone") (gt $replicaCount 1) -}}
+memcached: replicaCount
+ The standalone architecture doesn't allow to run more than 1 replica.
+ Please set a valid number of replicas (--set memcached.replicaCount=1) or
+ use the "high-availability" architecture (--set architecture="high-availability")
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Memcached - authentication */}}
+{{- define "memcached.validateValues.auth" -}}
+{{- if and .Values.auth.enabled (empty .Values.auth.username) -}}
+memcached: auth.username
+ Enabling authentication requires setting a valid admin username.
+ Please set a valid username (--set auth.username="xxxx")
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Memcached - containerSecurityContext.readOnlyRootFilesystem */}}
+{{- define "memcached.validateValues.readOnlyRootFilesystem" -}}
+{{- if and .Values.containerSecurityContext.enabled .Values.containerSecurityContext.readOnlyRootFilesystem .Values.auth.enabled -}}
+memcached: containerSecurityContext.readOnlyRootFilesystem
+ Enabling authentication is not compatible with using a read-only filesystem.
+ Please disable it (--set containerSecurityContext.readOnlyRootFilesystem=false)
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the password secret.
+*/}}
+{{- define "memcached.secretPasswordName" -}}
+ {{- if .Values.auth.existingPasswordSecret -}}
+ {{- printf "%s" (tpl .Values.auth.existingPasswordSecret $) -}}
+ {{- else -}}
+ {{- printf "%s" (include "common.names.fullname" .) -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/memcached/templates/deployment.yaml b/charts/openproject/charts/memcached/templates/deployment.yaml
new file mode 100644
index 0000000..33ea05b
--- /dev/null
+++ b/charts/openproject/charts/memcached/templates/deployment.yaml
@@ -0,0 +1,221 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if eq .Values.architecture "standalone" }}
+apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }}
+kind: Deployment
+metadata:
+ name: {{ template "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }}
+ replicas: {{ .Values.replicaCount }}
+ {{- if .Values.updateStrategy }}
+ strategy: {{- toYaml .Values.updateStrategy | nindent 4 }}
+ {{- end }}
+ template:
+ metadata:
+ labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }}
+ annotations:
+ {{- if .Values.auth.enabled }}
+ checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
+ {{- end }}
+ {{- if .Values.podAnnotations }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- include "memcached.imagePullSecrets" . | nindent 6 }}
+ automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
+ {{- if .Values.hostAliases }}
+ hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.affinity }}
+ affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.affinity "context" $) | nindent 8 }}
+ {{- else }}
+ affinity:
+ podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "customLabels" $podLabels "context" $) | nindent 10 }}
+ podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "customLabels" $podLabels "context" $) | nindent 10 }}
+ nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.nodeSelector }}
+ nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.nodeSelector "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.tolerations }}
+ tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" .) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.topologySpreadConstraints }}
+ topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.priorityClassName }}
+ priorityClassName: {{ .Values.priorityClassName }}
+ {{- end }}
+ {{- if .Values.schedulerName }}
+ schedulerName: {{ .Values.schedulerName }}
+ {{- end }}
+ {{- if .Values.podSecurityContext.enabled }}
+ securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.podSecurityContext "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.terminationGracePeriodSeconds }}
+ terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
+ {{- end }}
+ serviceAccountName: {{ template "memcached.serviceAccountName" . }}
+ {{- if .Values.initContainers }}
+ initContainers: {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: memcached
+ image: {{ template "memcached.image" . }}
+ imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+ {{- if .Values.containerSecurityContext.enabled }}
+ securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ {{- else if .Values.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- else if .Values.args }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }}
+ {{- end }}
+ env:
+ - name: BITNAMI_DEBUG
+ value: {{ ternary "true" "false" .Values.image.debug | quote }}
+ - name: MEMCACHED_PORT_NUMBER
+ value: {{ .Values.containerPorts.memcached | quote }}
+ {{- if .Values.auth.enabled }}
+ - name: MEMCACHED_USERNAME
+ value: {{ .Values.auth.username | quote }}
+ - name: MEMCACHED_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "memcached.secretPasswordName" . }}
+ key: memcached-password
+ {{- end }}
+ {{- if .Values.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }}
+ envFrom:
+ {{- if .Values.extraEnvVarsCM }}
+ - configMapRef:
+ name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }}
+ {{- end }}
+ {{- if .Values.extraEnvVarsSecret }}
+ - secretRef:
+ name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - name: memcache
+ containerPort: {{ .Values.containerPorts.memcached }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: memcache
+ {{- end }}
+ {{- if .Values.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: memcache
+ {{- end }}
+ {{- if .Values.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: memcache
+ {{- end }}
+ {{- end }}
+ {{- if .Values.lifecycleHooks }}
+ lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.resources }}
+ resources: {{- toYaml .Values.resources | nindent 12 }}
+ {{- else if ne .Values.resourcesPreset "none" }}
+ resources: {{- include "common.resources.preset" (dict "type" .Values.resourcesPreset) | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: empty-dir
+ mountPath: /opt/bitnami/memcached/conf
+ subPath: app-conf-dir
+ - name: empty-dir
+ mountPath: /tmp
+ subPath: tmp-dir
+ {{- if .Values.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.extraVolumeMounts "context" $ ) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.metrics.enabled }}
+ - name: metrics
+ image: {{ template "memcached.metrics.image" . }}
+ imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
+ {{- if .Values.metrics.containerSecurityContext.enabled }}
+ securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.metrics.containerSecurityContext "context" $) | nindent 12 }}
+ {{- end }}
+ ports:
+ - name: metrics
+ containerPort: {{ .Values.metrics.containerPorts.metrics }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.metrics.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /metrics
+ port: {{ .Values.metrics.containerPorts.metrics }}
+ {{- end }}
+ {{- if .Values.metrics.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /metrics
+ port: {{ .Values.metrics.containerPorts.metrics }}
+ {{- end }}
+ {{- if .Values.metrics.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }}
+ port: memcache
+ {{- end }}
+ {{- end }}
+ {{- if .Values.metrics.resources }}
+ resources: {{- toYaml .Values.metrics.resources | nindent 12 }}
+ {{- else if ne .Values.metrics.resourcesPreset "none" }}
+ resources: {{- include "common.resources.preset" (dict "type" .Values.metrics.resourcesPreset) | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: empty-dir
+ mountPath: /tmp
+ subPath: tmp-dir
+ {{- if .Values.metrics.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumeMounts "context" $ ) | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.sidecars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }}
+ {{- end }}
+ volumes:
+ - name: empty-dir
+ emptyDir: {}
+ {{- if .Values.extraVolumes }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/openproject/charts/memcached/templates/extra-list.yaml b/charts/openproject/charts/memcached/templates/extra-list.yaml
new file mode 100644
index 0000000..2d35a58
--- /dev/null
+++ b/charts/openproject/charts/memcached/templates/extra-list.yaml
@@ -0,0 +1,9 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- range .Values.extraDeploy }}
+---
+{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
+{{- end }}
diff --git a/charts/openproject/charts/memcached/templates/hpa.yaml b/charts/openproject/charts/memcached/templates/hpa.yaml
new file mode 100644
index 0000000..16f1e3b
--- /dev/null
+++ b/charts/openproject/charts/memcached/templates/hpa.yaml
@@ -0,0 +1,48 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if and .Values.autoscaling.enabled (eq .Values.architecture "high-availability") }}
+apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }}
+kind: HorizontalPodAutoscaler
+metadata:
+ name: {{ template "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ scaleTargetRef:
+ apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
+ kind: StatefulSet
+ name: {{ template "common.names.fullname" . }}
+ minReplicas: {{ .Values.autoscaling.minReplicas }}
+ maxReplicas: {{ .Values.autoscaling.maxReplicas }}
+ metrics:
+ {{- if .Values.autoscaling.targetMemory }}
+ - type: Resource
+ resource:
+ name: memory
+ {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }}
+ targetAverageUtilization: {{ .Values.autoscaling.targetMemory }}
+ {{- else }}
+ target:
+ type: Utilization
+ averageUtilization: {{ .Values.autoscaling.targetMemory }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.autoscaling.targetCPU }}
+ - type: Resource
+ resource:
+ name: cpu
+ {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }}
+ targetAverageUtilization: {{ .Values.autoscaling.targetCPU }}
+ {{- else }}
+ target:
+ type: Utilization
+ averageUtilization: {{ .Values.autoscaling.targetCPU }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/charts/openproject/charts/memcached/templates/metrics-svc.yaml b/charts/openproject/charts/memcached/templates/metrics-svc.yaml
new file mode 100644
index 0000000..f0db234
--- /dev/null
+++ b/charts/openproject/charts/memcached/templates/metrics-svc.yaml
@@ -0,0 +1,30 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if .Values.metrics.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ printf "%s-metrics" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }}
+ namespace: {{ .Release.Namespace }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: metrics
+ {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }}
+ {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.service.annotations .Values.commonAnnotations ) "context" . ) }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
+ {{- end }}
+spec:
+ type: ClusterIP
+ sessionAffinity: {{ .Values.metrics.service.sessionAffinity }}
+ {{- if .Values.metrics.service.clusterIP }}
+ clusterIP: {{ .Values.metrics.service.clusterIP }}
+ {{- end }}
+ ports:
+ - name: metrics
+ port: {{ .Values.metrics.service.ports.metrics }}
+ targetPort: metrics
+ {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }}
+ selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }}
+{{- end }}
diff --git a/charts/openproject/charts/memcached/templates/networkpolicy.yaml b/charts/openproject/charts/memcached/templates/networkpolicy.yaml
new file mode 100644
index 0000000..879c63f
--- /dev/null
+++ b/charts/openproject/charts/memcached/templates/networkpolicy.yaml
@@ -0,0 +1,74 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if .Values.networkPolicy.enabled }}
+kind: NetworkPolicy
+apiVersion: {{ template "common.capabilities.networkPolicy.apiVersion" . }}
+metadata:
+ name: {{ template "common.names.fullname" . }}
+ namespace: {{ include "common.names.namespace" . | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }}
+ policyTypes:
+ - Ingress
+ - Egress
+ {{- if .Values.networkPolicy.allowExternalEgress }}
+ egress:
+ - {}
+ {{- else }}
+ egress:
+ # Allow dns resolution
+ - ports:
+ - port: 53
+ protocol: UDP
+ - port: 53
+ protocol: TCP
+ # Allow connection to other cluster pods
+ - ports:
+ - port: {{ .Values.containerPorts.memcached }}
+ to:
+ - podSelector:
+ matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }}
+ {{- if .Values.networkPolicy.extraEgress }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.rts.networkPolicy.extraEgress "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+ ingress:
+ - ports:
+ - port: {{ .Values.containerPorts.memcached }}
+ {{- if .Values.metrics.enabled }}
+ - port: {{ .Values.metrics.containerPorts.metrics }}
+ {{- end }}
+ {{- if not .Values.networkPolicy.allowExternal }}
+ from:
+ - podSelector:
+ matchLabels:
+ {{ template "common.names.fullname" . }}-client: "true"
+ - podSelector:
+ matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }}
+ {{- if .Values.networkPolicy.ingressNSMatchLabels }}
+ - namespaceSelector:
+ matchLabels:
+ {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }}
+ {{ $key | quote }}: {{ $value | quote }}
+ {{- end }}
+ {{- if .Values.networkPolicy.ingressNSPodMatchLabels }}
+ podSelector:
+ matchLabels:
+ {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }}
+ {{ $key | quote }}: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.networkPolicy.extraIngress }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraIngress "context" $ ) | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/openproject/charts/memcached/templates/pdb.yaml b/charts/openproject/charts/memcached/templates/pdb.yaml
new file mode 100644
index 0000000..f2d7bed
--- /dev/null
+++ b/charts/openproject/charts/memcached/templates/pdb.yaml
@@ -0,0 +1,26 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if and .Values.pdb.create (eq .Values.architecture "high-availability") }}
+apiVersion: {{ include "common.capabilities.policy.apiVersion" . }}
+kind: PodDisruptionBudget
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ {{- if .Values.pdb.minAvailable }}
+ minAvailable: {{ .Values.pdb.minAvailable }}
+ {{- end }}
+ {{- if .Values.pdb.maxUnavailable }}
+ maxUnavailable: {{ .Values.pdb.maxUnavailable }}
+ {{- end }}
+ {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }}
+{{- end }}
diff --git a/charts/openproject/charts/memcached/templates/secrets.yaml b/charts/openproject/charts/memcached/templates/secrets.yaml
new file mode 100644
index 0000000..d989dab
--- /dev/null
+++ b/charts/openproject/charts/memcached/templates/secrets.yaml
@@ -0,0 +1,19 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if and (.Values.auth.enabled) (not .Values.auth.existingPasswordSecret) }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ template "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+type: Opaque
+data:
+ memcached-password: {{ default (randAlphaNum 10) .Values.auth.password | b64enc | quote }}
+{{- end }}
diff --git a/charts/openproject/charts/memcached/templates/service.yaml b/charts/openproject/charts/memcached/templates/service.yaml
new file mode 100644
index 0000000..d131219
--- /dev/null
+++ b/charts/openproject/charts/memcached/templates/service.yaml
@@ -0,0 +1,49 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- if or .Values.service.annotations .Values.commonAnnotations }}
+ {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.annotations .Values.commonAnnotations ) "context" . ) }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
+ {{- end }}
+spec:
+ type: {{ .Values.service.type }}
+ {{- if .Values.service.sessionAffinity }}
+ sessionAffinity: {{ .Values.service.sessionAffinity }}
+ {{- end }}
+ {{- if .Values.service.sessionAffinityConfig }}
+ sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }}
+ clusterIP: {{ .Values.service.clusterIP }}
+ {{- end }}
+ {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }}
+ externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}
+ {{- end }}
+ {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges)) }}
+ loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }}
+ {{- end }}
+ {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }}
+ loadBalancerIP: {{ .Values.service.loadBalancerIP }}
+ {{- end }}
+ ports:
+ - name: memcache
+ port: {{ .Values.service.ports.memcached }}
+ targetPort: memcache
+ {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.memcached)) }}
+ nodePort: {{ .Values.service.nodePorts.memcached }}
+ {{- else if eq .Values.service.type "ClusterIP" }}
+ nodePort: null
+ {{- end }}
+ {{- if .Values.service.extraPorts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }}
+ {{- end }}
+ {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }}
+ selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }}
diff --git a/charts/openproject/charts/memcached/templates/serviceaccount.yaml b/charts/openproject/charts/memcached/templates/serviceaccount.yaml
new file mode 100644
index 0000000..c865d8e
--- /dev/null
+++ b/charts/openproject/charts/memcached/templates/serviceaccount.yaml
@@ -0,0 +1,18 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if .Values.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
+metadata:
+ name: {{ template "memcached.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }}
+ {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/openproject/charts/memcached/templates/servicemonitor.yaml b/charts/openproject/charts/memcached/templates/servicemonitor.yaml
new file mode 100644
index 0000000..cb0d197
--- /dev/null
+++ b/charts/openproject/charts/memcached/templates/servicemonitor.yaml
@@ -0,0 +1,48 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: {{ template "common.names.fullname" . }}
+ namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace | quote }}
+ {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.labels .Values.commonLabels ) "context" . ) }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }}
+ {{- if .Values.commonAnnotations }}
+ annotations:
+ {{- end }}
+spec:
+ {{- if .Values.metrics.serviceMonitor.jobLabel }}
+ jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }}
+ {{- end }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }}
+ {{- if .Values.metrics.serviceMonitor.selector }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }}
+ {{- end }}
+ app.kubernetes.io/component: metrics
+ endpoints:
+ - port: metrics
+ path: /metrics
+ {{- if .Values.metrics.serviceMonitor.interval }}
+ interval: {{ .Values.metrics.serviceMonitor.interval }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
+ scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.honorLabels }}
+ honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.metricRelabelings }}
+ metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.metricRelabelings | nindent 6 }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.relabelings }}
+ relabelings: {{- toYaml .Values.metrics.serviceMonitor.relabelings | nindent 6 }}
+ {{- end }}
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace }}
+{{- end }}
diff --git a/charts/openproject/charts/memcached/templates/statefulset.yaml b/charts/openproject/charts/memcached/templates/statefulset.yaml
new file mode 100644
index 0000000..d5f3d54
--- /dev/null
+++ b/charts/openproject/charts/memcached/templates/statefulset.yaml
@@ -0,0 +1,291 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if eq .Values.architecture "high-availability" }}
+apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
+kind: StatefulSet
+metadata:
+ name: {{ template "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }}
+ {{- if not (and .Values.autoscaling.enabled (eq .Values.architecture "high-availability")) }}
+ replicas: {{ .Values.replicaCount }}
+ {{- end }}
+ {{- if .Values.podManagementPolicy }}
+ podManagementPolicy: {{ .Values.podManagementPolicy | quote }}
+ {{- end }}
+ serviceName: {{ template "common.names.fullname" . }}
+ {{- if .Values.updateStrategy }}
+ updateStrategy: {{- toYaml .Values.updateStrategy | nindent 4 }}
+ {{- end }}
+ template:
+ metadata:
+ labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }}
+ annotations:
+ {{- if .Values.auth.enabled }}
+ checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
+ {{- end }}
+ {{- if .Values.podAnnotations }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- include "memcached.imagePullSecrets" . | nindent 6 }}
+ automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
+ {{- if .Values.hostAliases }}
+ hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.affinity }}
+ affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.affinity "context" $) | nindent 8 }}
+ {{- else }}
+ affinity:
+ podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "customLabels" $podLabels "context" $) | nindent 10 }}
+ podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "customLabels" $podLabels "context" $) | nindent 10 }}
+ nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.nodeSelector }}
+ nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.nodeSelector "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.tolerations }}
+ tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" .) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.topologySpreadConstraints }}
+ topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.priorityClassName }}
+ priorityClassName: {{ .Values.priorityClassName }}
+ {{- end }}
+ {{- if .Values.schedulerName }}
+ schedulerName: {{ .Values.schedulerName }}
+ {{- end }}
+ {{- if .Values.podSecurityContext.enabled }}
+ securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.podSecurityContext "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.terminationGracePeriodSeconds }}
+ terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
+ {{- end }}
+ serviceAccountName: {{ template "memcached.serviceAccountName" . }}
+ {{- if or .Values.persistence.enabled .Values.initContainers }}
+ initContainers:
+ {{- if and .Values.persistence.enabled .Values.volumePermissions.enabled }}
+ - name: volume-permissions
+ image: {{ include "memcached.volumePermissions.image" . }}
+ imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+ command:
+ - /bin/bash
+ args:
+ - -ec
+ - |
+ mkdir -p /cache-state
+ touch /cache-state/memory_file
+ find /cache-state -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}
+ securityContext:
+ runAsUser: {{ .Values.volumePermissions.containerSecurityContext.runAsUser }}
+ {{- if .Values.volumePermissions.resources }}
+ resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
+ {{- else if ne .Values.volumePermissions.resourcesPreset "none" }}
+ resources: {{- include "common.resources.preset" (dict "type" .Values.volumePermissions.resourcesPreset) | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: data
+ mountPath: /cache-state
+ - name: empty-dir
+ mountPath: /tmp
+ subPath: tmp-dir
+ {{- end }}
+ {{- if .Values.initContainers }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }}
+ {{- end }}
+ {{- end }}
+ containers:
+ - name: memcached
+ image: {{ template "memcached.image" . }}
+ imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+ {{- if .Values.containerSecurityContext.enabled }}
+ securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ {{- else if .Values.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- else if .Values.args }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }}
+ {{- else if .Values.persistence.enabled }}
+ args:
+ - /run.sh
+ - --memory-file=/cache-state/memory_file
+ {{- end }}
+ env:
+ - name: BITNAMI_DEBUG
+ value: {{ ternary "true" "false" .Values.image.debug | quote }}
+ - name: MEMCACHED_PORT_NUMBER
+ value: {{ .Values.containerPorts.memcached | quote }}
+ {{- if .Values.auth.enabled }}
+ - name: MEMCACHED_USERNAME
+ value: {{ .Values.auth.username | quote }}
+ - name: MEMCACHED_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "memcached.secretPasswordName" . }}
+ key: memcached-password
+ {{- end }}
+ {{- if .Values.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }}
+ envFrom:
+ {{- if .Values.extraEnvVarsCM }}
+ - configMapRef:
+ name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }}
+ {{- end }}
+ {{- if .Values.extraEnvVarsSecret }}
+ - secretRef:
+ name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - name: memcache
+ containerPort: {{ .Values.containerPorts.memcached }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: memcache
+ {{- end }}
+ {{- if .Values.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: memcache
+ {{- end }}
+ {{- if .Values.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: memcache
+ {{- end }}
+ {{- end }}
+ {{- if .Values.lifecycleHooks }}
+ lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }}
+ {{- else if .Values.persistence.enabled }}
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ /usr/bin/pkill -10 memcached
+ sleep 60s
+ {{- end }}
+ {{- if .Values.resources }}
+ resources: {{- toYaml .Values.resources | nindent 12 }}
+ {{- else if ne .Values.resourcesPreset "none" }}
+ resources: {{- include "common.resources.preset" (dict "type" .Values.resourcesPreset) | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.persistence.enabled }}
+ - name: data
+ mountPath: /cache-state
+ {{- end }}
+ - name: empty-dir
+ mountPath: /opt/bitnami/memcached/conf
+ subPath: app-conf-dir
+ - name: empty-dir
+ mountPath: /tmp
+ subPath: tmp-dir
+ {{- if .Values.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.extraVolumeMounts "context" $ ) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.metrics.enabled }}
+ - name: metrics
+ image: {{ template "memcached.metrics.image" . }}
+ imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
+ {{- if .Values.metrics.containerSecurityContext.enabled }}
+ securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.metrics.containerSecurityContext "context" $) | nindent 12 }}
+ {{- end }}
+ ports:
+ - name: metrics
+ containerPort: {{ .Values.metrics.containerPorts.metrics }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.metrics.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /metrics
+ port: {{ .Values.metrics.containerPorts.metrics }}
+ {{- end }}
+ {{- if .Values.metrics.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /metrics
+ port: {{ .Values.metrics.containerPorts.metrics }}
+ {{- end }}
+ {{- if .Values.metrics.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: metrics
+ {{- end }}
+ {{- end }}
+ {{- if .Values.metrics.resources }}
+ resources: {{- toYaml .Values.metrics.resources | nindent 12 }}
+ {{- else if ne .Values.metrics.resourcesPreset "none" }}
+ resources: {{- include "common.resources.preset" (dict "type" .Values.metrics.resourcesPreset) | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.sidecars }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $ ) | nindent 8 }}
+ {{- end }}
+ volumes:
+ - name: empty-dir
+ emptyDir: {}
+ {{- if .Values.extraVolumes }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.persistence.enabled }}
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ {{- $claimLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.persistence.labels .Values.commonLabels ) "context" . ) }}
+ labels: {{- include "common.labels.matchLabels" ( dict "customLabels" $claimLabels "context" $ ) | nindent 10 }}
+ {{- if or .Values.persistence.annotations .Values.commonAnnotations }}
+ {{- $claimAnnotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.persistence.annotations .Values.commonAnnotations ) "context" . ) }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $) | nindent 10 }}
+ {{- end }}
+ spec:
+ accessModes:
+ {{- range .Values.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size | quote }}
+ {{- if .Values.persistence.selector }}
+ selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }}
+ {{- end }}
+ {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 8 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/openproject/charts/memcached/values.yaml b/charts/openproject/charts/memcached/values.yaml
new file mode 100644
index 0000000..c61da0e
--- /dev/null
+++ b/charts/openproject/charts/memcached/values.yaml
@@ -0,0 +1,831 @@
+# Copyright VMware, Inc.
+# SPDX-License-Identifier: APACHE-2.0
+
+## @section Global parameters
+## Global Docker image parameters
+## Please, note that this will override the image parameters, including dependencies, configured to use the global value
+## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
+
+## @param global.imageRegistry Global Docker image registry
+## @param global.imagePullSecrets Global Docker registry secret names as an array
+## @param global.storageClass Global StorageClass for Persistent Volume(s)
+##
+global:
+ imageRegistry: ""
+ ## E.g.
+ ## imagePullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ imagePullSecrets: []
+ storageClass: ""
+ ## Compatibility adaptations for Kubernetes platforms
+ ##
+ compatibility:
+ ## Compatibility adaptations for Openshift
+ ##
+ openshift:
+ ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
+ ##
+ adaptSecurityContext: disabled
+## @section Common parameters
+
+## @param kubeVersion Override Kubernetes version
+##
+kubeVersion: ""
+## @param nameOverride String to partially override common.names.fullname template (will maintain the release name)
+##
+nameOverride: ""
+## @param fullnameOverride String to fully override common.names.fullname template
+##
+fullnameOverride: ""
+## @param clusterDomain Kubernetes Cluster Domain
+##
+clusterDomain: cluster.local
+## @param extraDeploy Extra objects to deploy (evaluated as a template)
+##
+extraDeploy: []
+## @param commonLabels Add labels to all the deployed resources
+##
+commonLabels: {}
+## @param commonAnnotations Add annotations to all the deployed resources
+##
+commonAnnotations: {}
+## Enable diagnostic mode in the deployment/statefulset
+##
+diagnosticMode:
+ ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
+ ##
+ enabled: false
+ ## @param diagnosticMode.command Command to override all containers in the deployment/statefulset
+ ##
+ command:
+ - sleep
+ ## @param diagnosticMode.args Args to override all containers in the deployment/statefulset
+ ##
+ args:
+ - infinity
+## @section Memcached parameters
+
+## Bitnami Memcached image version
+## ref: https://hub.docker.com/r/bitnami/memcached/tags/
+## @param image.registry [default: REGISTRY_NAME] Memcached image registry
+## @param image.repository [default: REPOSITORY_NAME/memcached] Memcached image repository
+## @skip image.tag Memcached image tag (immutable tags are recommended)
+## @param image.digest Memcached image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+## @param image.pullPolicy Memcached image pull policy
+## @param image.pullSecrets Specify docker-registry secret names as an array
+## @param image.debug Specify if debug values should be set
+##
+image:
+ registry: docker.io
+ repository: bitnami/memcached
+ tag: 1.6.24-debian-12-r0
+ digest: ""
+ ## Specify a imagePullPolicy
+ ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+ ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
+ ##
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## e.g:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+ ## Set to true if you would like to see extra information on logs
+ ##
+ debug: false
+## @param architecture Memcached architecture. Allowed values: standalone or high-availability
+##
+architecture: standalone
+## Authentication parameters
+## ref: https://github.com/bitnami/containers/tree/main/bitnami/memcached#creating-the-memcached-admin-user
+##
+auth:
+ ## @param auth.enabled Enable Memcached authentication
+ ##
+ enabled: false
+ ## @param auth.username Memcached admin user
+ ##
+ username: ""
+ ## @param auth.password Memcached admin password
+ ##
+ password: ""
+ ## @param auth.existingPasswordSecret Existing secret with Memcached credentials (must contain a value for `memcached-password` key)
+ ##
+ existingPasswordSecret: ""
+## @param command Override default container command (useful when using custom images)
+##
+command: []
+## @param args Override default container args (useful when using custom images)
+## e.g:
+## args:
+## - /run.sh
+## - -m <maxMemoryLimit>
+## - -I <maxItemSize>
+## - -vv
+##
+args: []
+## @param extraEnvVars Array with extra environment variables to add to Memcached nodes
+## e.g:
+## extraEnvVars:
+## - name: FOO
+## value: "bar"
+##
+extraEnvVars: []
+## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Memcached nodes
+##
+extraEnvVarsCM: ""
+## @param extraEnvVarsSecret Name of existing Secret containing extra env vars for Memcached nodes
+##
+extraEnvVarsSecret: ""
+## @section Deployment/Statefulset parameters
+
+## @param replicaCount Number of Memcached nodes
+##
+replicaCount: 1
+## @param containerPorts.memcached Memcached container port
+##
+containerPorts:
+ memcached: 11211
+## Configure extra options for Memcached containers' liveness, readiness and startup probes
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
+## @param livenessProbe.enabled Enable livenessProbe on Memcached containers
+## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+## @param livenessProbe.periodSeconds Period seconds for livenessProbe
+## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+## @param livenessProbe.failureThreshold Failure threshold for livenessProbe
+## @param livenessProbe.successThreshold Success threshold for livenessProbe
+##
+livenessProbe:
+ enabled: true
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+## @param readinessProbe.enabled Enable readinessProbe on Memcached containers
+## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+## @param readinessProbe.periodSeconds Period seconds for readinessProbe
+## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+## @param readinessProbe.failureThreshold Failure threshold for readinessProbe
+## @param readinessProbe.successThreshold Success threshold for readinessProbe
+##
+readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 5
+ timeoutSeconds: 3
+ failureThreshold: 6
+ successThreshold: 1
+## @param startupProbe.enabled Enable startupProbe on Memcached containers
+## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
+## @param startupProbe.periodSeconds Period seconds for startupProbe
+## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe
+## @param startupProbe.failureThreshold Failure threshold for startupProbe
+## @param startupProbe.successThreshold Success threshold for startupProbe
+##
+startupProbe:
+ enabled: false
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 1
+ failureThreshold: 15
+ successThreshold: 1
+## @param customLivenessProbe Custom livenessProbe that overrides the default one
+##
+customLivenessProbe: {}
+## @param customReadinessProbe Custom readinessProbe that overrides the default one
+##
+customReadinessProbe: {}
+## @param customStartupProbe Custom startupProbe that overrides the default one
+##
+customStartupProbe: {}
+## @param lifecycleHooks for the Memcached container(s) to automate configuration before or after startup
+##
+lifecycleHooks: {}
+## Memcached resource requests and limits
+## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
+## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
+## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
+##
+resourcesPreset: "none"
+## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
+## Example:
+## resources:
+## requests:
+## cpu: 2
+## memory: 512Mi
+## limits:
+## cpu: 3
+## memory: 1024Mi
+##
+resources: {}
+## Configure Pods Security Context
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+## @param podSecurityContext.enabled Enabled Memcached pods' Security Context
+## @param podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
+## @param podSecurityContext.sysctls Set kernel settings using the sysctl interface
+## @param podSecurityContext.supplementalGroups Set filesystem extra groups
+## @param podSecurityContext.fsGroup Set Memcached pod's Security Context fsGroup
+##
+podSecurityContext:
+ enabled: true
+ fsGroupChangePolicy: Always
+ sysctls: []
+ supplementalGroups: []
+ fsGroup: 1001
+## Configure Container Security Context
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+## @param containerSecurityContext.enabled Enabled containers' Security Context
+## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
+## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser
+## @param containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
+## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
+## @param containerSecurityContext.privileged Set container's Security Context privileged
+## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
+## @param containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
+## @param containerSecurityContext.capabilities.drop List of capabilities to be dropped
+## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
+##
+containerSecurityContext:
+ enabled: true
+ seLinuxOptions: null
+ runAsUser: 1001
+ runAsGroup: 1001
+ runAsNonRoot: true
+ privileged: false
+ readOnlyRootFilesystem: false
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop: ["ALL"]
+ seccompProfile:
+ type: "RuntimeDefault"
+## @param automountServiceAccountToken Mount Service Account token in pod
+##
+automountServiceAccountToken: false
+## @param hostAliases Add deployment host aliases
+## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
+##
+hostAliases: []
+## @param podLabels Extra labels for Memcached pods
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+##
+podLabels: {}
+## @param podAnnotations Annotations for Memcached pods
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+##
+podAnnotations: {}
+## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+##
+podAffinityPreset: ""
+## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+##
+podAntiAffinityPreset: soft
+## Node affinity preset
+## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+##
+nodeAffinityPreset:
+ ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+ ##
+ type: ""
+ ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set.
+ ## E.g.
+ ## key: "kubernetes.io/e2e-az-name"
+ ##
+ key: ""
+ ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set.
+ ## E.g.
+ ## values:
+ ## - e2e-az1
+ ## - e2e-az2
+ ##
+ values: []
+## @param affinity Affinity for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
+##
+affinity: {}
+## @param nodeSelector Node labels for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
+##
+nodeSelector: {}
+## @param tolerations Tolerations for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+##
+tolerations: []
+## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
+## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
+##
+topologySpreadConstraints: []
+## @param podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel`
+## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy
+##
+podManagementPolicy: Parallel
+## @param priorityClassName Name of the existing priority class to be used by Memcached pods, priority class needs to be created beforehand
+## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
+##
+priorityClassName: ""
+## @param schedulerName Kubernetes pod scheduler registry
+## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+##
+schedulerName: ""
+## @param terminationGracePeriodSeconds In seconds, time the given to the memcached pod needs to terminate gracefully
+## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
+##
+terminationGracePeriodSeconds: ""
+## @param updateStrategy.type Memcached statefulset strategy type
+## @param updateStrategy.rollingUpdate Memcached statefulset rolling update configuration parameters
+## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+##
+updateStrategy:
+ type: RollingUpdate
+ rollingUpdate: {}
+## @param extraVolumes Optionally specify extra list of additional volumes for the Memcached pod(s)
+## Example Use Case: mount certificates to enable TLS
+## e.g:
+## extraVolumes:
+## - name: zookeeper-keystore
+## secret:
+## defaultMode: 288
+## secretName: zookeeper-keystore
+## - name: zookeeper-truststore
+## secret:
+## defaultMode: 288
+## secretName: zookeeper-truststore
+##
+extraVolumes: []
+## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Memcached container(s)
+## Example Use Case: mount certificates to enable TLS
+## e.g:
+## extraVolumeMounts:
+## - name: zookeeper-keystore
+## mountPath: /certs/keystore
+## readOnly: true
+## - name: zookeeper-truststore
+## mountPath: /certs/truststore
+## readOnly: true
+##
+extraVolumeMounts: []
+## @param sidecars Add additional sidecar containers to the Memcached pod(s)
+## e.g:
+## sidecars:
+## - name: your-image-name
+## image: your-image
+## imagePullPolicy: Always
+## ports:
+## - name: portname
+## containerPort: 1234
+##
+sidecars: []
+## @param initContainers Add additional init containers to the Memcached pod(s)
+## Example:
+## initContainers:
+## - name: your-image-name
+## image: your-image
+## imagePullPolicy: Always
+## ports:
+## - name: portname
+## containerPort: 1234
+##
+initContainers: []
+## Memcached Autoscaling
+## @param autoscaling.enabled Enable memcached statefulset autoscaling (requires architecture: "high-availability")
+## @param autoscaling.minReplicas memcached statefulset autoscaling minimum number of replicas
+## @param autoscaling.maxReplicas memcached statefulset autoscaling maximum number of replicas
+## @param autoscaling.targetCPU memcached statefulset autoscaling target CPU percentage
+## @param autoscaling.targetMemory memcached statefulset autoscaling target CPU memory
+##
+autoscaling:
+ enabled: false
+ minReplicas: 3
+ maxReplicas: 6
+ targetCPU: 50
+ targetMemory: 50
+## Memcached Pod Disruption Budget
+## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+## @param pdb.create Deploy a pdb object for the Memcached pod
+## @param pdb.minAvailable Minimum available Memcached replicas
+## @param pdb.maxUnavailable Maximum unavailable Memcached replicas
+##
+pdb:
+ create: false
+ minAvailable: ""
+ maxUnavailable: 1
+## @section Traffic Exposure parameters
+service:
+ ## @param service.type Kubernetes Service type
+ ##
+ type: ClusterIP
+ ## @param service.ports.memcached Memcached service port
+ ##
+ ports:
+ memcached: 11211
+ ## Node ports to expose
+ ## NOTE: choose port between <30000-32767>
+ ## @param service.nodePorts.memcached Node port for Memcached
+ ##
+ nodePorts:
+ memcached: ""
+ ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin
+ ## Values: ClientIP or None
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/
+ ##
+ sessionAffinity: ""
+ ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity
+ ## sessionAffinityConfig:
+ ## clientIP:
+ ## timeoutSeconds: 300
+ ##
+ sessionAffinityConfig: {}
+ ## @param service.clusterIP Memcached service Cluster IP
+ ## e.g.:
+ ## clusterIP: None
+ ##
+ clusterIP: ""
+ ## @param service.loadBalancerIP Memcached service Load Balancer IP
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
+ ##
+ loadBalancerIP: ""
+ ## @param service.loadBalancerSourceRanges Memcached service Load Balancer sources
+ ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
+ ## e.g:
+ ## loadBalancerSourceRanges:
+ ## - 10.10.10.0/24
+ ##
+ loadBalancerSourceRanges: []
+ ## @param service.externalTrafficPolicy Memcached service external traffic policy
+ ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ ##
+ externalTrafficPolicy: Cluster
+ ## @param service.annotations Additional custom annotations for Memcached service
+ ##
+ annotations: {}
+ ## @param service.extraPorts Extra ports to expose in the Memcached service (normally used with the `sidecar` value)
+ ##
+ extraPorts: []
+## Network Policy configuration
+## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
+##
+networkPolicy:
+ ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources
+ ##
+ enabled: true
+ ## @param networkPolicy.allowExternal The Policy model to apply
+ ## When set to false, only pods with the correct client label will have network access to the ports Keycloak is
+ ## listening on. When true, Keycloak will accept connections from any source (with the correct destination port).
+ ##
+ allowExternal: true
+ ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
+ ##
+ allowExternalEgress: true
+ ## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
+ ## e.g:
+ ## extraIngress:
+ ## - ports:
+ ## - port: 1234
+ ## from:
+ ## - podSelector:
+ ## - matchLabels:
+ ## - role: frontend
+ ## - podSelector:
+ ## - matchExpressions:
+ ## - key: role
+ ## operator: In
+ ## values:
+ ## - frontend
+ ##
+ extraIngress: []
+ ## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
+ ## e.g:
+ ## extraEgress:
+ ## - ports:
+ ## - port: 1234
+ ## to:
+ ## - podSelector:
+ ## - matchLabels:
+ ## - role: frontend
+ ## - podSelector:
+ ## - matchExpressions:
+ ## - key: role
+ ## operator: In
+ ## values:
+ ## - frontend
+ ##
+ extraEgress: []
+ ## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
+ ## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
+ ##
+ ingressNSMatchLabels: {}
+ ingressNSPodMatchLabels: {}
+## @section Other Parameters
+
+## Service account for Memcached to use.
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+##
+serviceAccount:
+ ## @param serviceAccount.create Enable creation of ServiceAccount for Memcached pod
+ ##
+ create: true
+ ## @param serviceAccount.name The name of the ServiceAccount to use.
+ ## If not set and create is true, a name is generated using the common.names.fullname template
+ ##
+ name: ""
+ ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
+ ## Can be set to false if pods using this serviceAccount do not need to use K8s API
+ ##
+ automountServiceAccountToken: false
+ ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount
+ ##
+ annotations: {}
+## @section Persistence parameters
+
+## Enable persistence using Persistent Volume Claims
+## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
+##
+persistence:
+ ## @param persistence.enabled Enable Memcached data persistence using PVC. If false, use emptyDir
+ ##
+ enabled: false
+ ## @param persistence.storageClass PVC Storage Class for Memcached data volume
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ storageClass: ""
+ ## @param persistence.accessModes PVC Access modes
+ ##
+ accessModes:
+ - ReadWriteOnce
+ ## @param persistence.size PVC Storage Request for Memcached data volume
+ ##
+ size: 8Gi
+ ## @param persistence.annotations Annotations for the PVC
+ ##
+ annotations: {}
+ ## @param persistence.labels Labels for the PVC
+ ##
+ labels: {}
+ ## @param persistence.selector Selector to match an existing Persistent Volume for Memcached's data PVC
+ ## If set, the PVC can't have a PV dynamically provisioned for it
+ ## E.g.
+ ## selector:
+ ## matchLabels:
+ ## app: my-app
+ ##
+ selector: {}
+## @section Volume Permissions parameters
+##
+
+## Init containers parameters:
+## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node
+##
+volumePermissions:
+ ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume
+ ##
+ enabled: false
+ ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry
+ ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image repository
+ ## @skip volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended)
+ ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+ ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy
+ ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets
+ ##
+ image:
+ registry: docker.io
+ repository: bitnami/os-shell
+ tag: 12-debian-12-r16
+ digest: ""
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## Example:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+ ## Init container resource requests and limits
+ ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
+ ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production).
+ ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
+ ##
+ resourcesPreset: "none"
+ ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
+ ## Example:
+ ## resources:
+ ## requests:
+ ## cpu: 2
+ ## memory: 512Mi
+ ## limits:
+ ## cpu: 3
+ ## memory: 1024Mi
+ ##
+ resources: {}
+ ## Init container' Security Context
+ ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser
+ ## and not the below volumePermissions.containerSecurityContext.runAsUser
+ ## @param volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
+ ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container
+ ##
+ containerSecurityContext:
+ seLinuxOptions: null
+ runAsUser: 0
+## Prometheus Exporter / Metrics
+##
+metrics:
+ ## @param metrics.enabled Start a side-car prometheus exporter
+ ##
+ enabled: false
+ ## Bitnami Memcached Prometheus Exporter image
+ ## ref: https://hub.docker.com/r/bitnami/memcached-exporter/tags/
+ ## @param metrics.image.registry [default: REGISTRY_NAME] Memcached exporter image registry
+ ## @param metrics.image.repository [default: REPOSITORY_NAME/memcached-exporter] Memcached exporter image repository
+ ## @skip metrics.image.tag Memcached exporter image tag (immutable tags are recommended)
+ ## @param metrics.image.digest Memcached exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+ ## @param metrics.image.pullPolicy Image pull policy
+ ## @param metrics.image.pullSecrets Specify docker-registry secret names as an array
+ ##
+ image:
+ registry: docker.io
+ repository: bitnami/memcached-exporter
+ tag: 0.14.2-debian-12-r10
+ digest: ""
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## e.g:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+ ## @param metrics.containerPorts.metrics Memcached Prometheus Exporter container port
+ ##
+ containerPorts:
+ metrics: 9150
+ ## Memcached Prometheus exporter container resource requests and limits
+ ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
+ ## @param metrics.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production).
+ ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
+ ##
+ resourcesPreset: "none"
+ ## @param metrics.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
+ ## Example:
+ ## resources:
+ ## requests:
+ ## cpu: 2
+ ## memory: 512Mi
+ ## limits:
+ ## cpu: 3
+ ## memory: 1024Mi
+ ##
+ resources: {}
+ ## Configure Metrics Container Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+ ## @param metrics.containerSecurityContext.enabled Enabled containers' Security Context
+ ## @param metrics.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
+ ## @param metrics.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
+ ## @param metrics.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
+ ## @param metrics.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
+ ## @param metrics.containerSecurityContext.privileged Set container's Security Context privileged
+ ## @param metrics.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
+ ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
+ ## @param metrics.containerSecurityContext.capabilities.drop List of capabilities to be dropped
+ ## @param metrics.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
+ ##
+ containerSecurityContext:
+ enabled: true
+ seLinuxOptions: null
+ runAsUser: 1001
+ runAsGroup: 1001
+ runAsNonRoot: true
+ privileged: false
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop: ["ALL"]
+ seccompProfile:
+ type: "RuntimeDefault"
+ ## Configure extra options for Memcached Prometheus exporter containers' liveness, readiness and startup probes
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
+ ## @param metrics.livenessProbe.enabled Enable livenessProbe on Memcached Prometheus exporter containers
+ ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+ ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe
+ ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+ ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe
+ ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe
+ ##
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 15
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 3
+ successThreshold: 1
+ ## @param metrics.readinessProbe.enabled Enable readinessProbe on Memcached Prometheus exporter containers
+ ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+ ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe
+ ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+ ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe
+ ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe
+ ##
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 3
+ failureThreshold: 3
+ successThreshold: 1
+ ## @param metrics.startupProbe.enabled Enable startupProbe on Memcached Prometheus exporter containers
+ ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
+ ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe
+ ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe
+ ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe
+ ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe
+ ##
+ startupProbe:
+ enabled: false
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 1
+ failureThreshold: 15
+ successThreshold: 1
+ ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one
+ ##
+ customLivenessProbe: {}
+ ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one
+ ##
+ customReadinessProbe: {}
+ ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one
+ ##
+ customStartupProbe: {}
+ ## @param metrics.podAnnotations [object] Memcached Prometheus exporter pod Annotation and Labels
+ ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+ ##
+ podAnnotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "{{ .Values.metrics.containerPorts.metrics }}"
+ ## Service configuration
+ ##
+ service:
+ ## @param metrics.service.ports.metrics Prometheus metrics service port
+ ##
+ ports:
+ metrics: 9150
+ ## @param metrics.service.clusterIP Static clusterIP or None for headless services
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
+ ##
+ clusterIP: ""
+ ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin
+ ## Values: ClientIP or None
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/
+ ##
+ sessionAffinity: None
+ ## @param metrics.service.annotations [object] Annotations for the Prometheus metrics service
+ ##
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}"
+ ## Prometheus Operator ServiceMonitor configuration
+ ##
+ serviceMonitor:
+ ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator
+ ##
+ enabled: false
+ ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace)
+ ##
+ namespace: ""
+ ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped.
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+ ##
+ interval: ""
+ ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+ ##
+ scrapeTimeout: ""
+ ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus
+ ##
+ labels: {}
+ ## @param metrics.serviceMonitor.selector Prometheus instance selector labels
+ ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
+ ##
+ selector: {}
+ ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
+ ##
+ relabelings: []
+ ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion
+ ##
+ metricRelabelings: []
+ ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint
+ ##
+ honorLabels: false
+ ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
+ ##
+ jobLabel: ""
diff --git a/charts/openproject/charts/postgresql/.helmignore b/charts/openproject/charts/postgresql/.helmignore
new file mode 100644
index 0000000..f0c1319
--- /dev/null
+++ b/charts/openproject/charts/postgresql/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/charts/openproject/charts/postgresql/Chart.lock b/charts/openproject/charts/postgresql/Chart.lock
new file mode 100644
index 0000000..65b5f10
--- /dev/null
+++ b/charts/openproject/charts/postgresql/Chart.lock
@@ -0,0 +1,6 @@
+dependencies:
+- name: common
+ repository: oci://registry-1.docker.io/bitnamicharts
+ version: 2.12.0
+digest: sha256:bee62139700f032539621dd38fa1d7285f277b91577c55ea26045254d33825ed
+generated: "2023-09-22T17:01:50.438576915Z"
diff --git a/charts/openproject/charts/postgresql/Chart.yaml b/charts/openproject/charts/postgresql/Chart.yaml
new file mode 100644
index 0000000..fccc345
--- /dev/null
+++ b/charts/openproject/charts/postgresql/Chart.yaml
@@ -0,0 +1,37 @@
+annotations:
+ category: Database
+ images: |
+ - name: os-shell
+ image: docker.io/bitnami/os-shell:11-debian-11-r77
+ - name: postgres-exporter
+ image: docker.io/bitnami/postgres-exporter:0.14.0-debian-11-r2
+ - name: postgresql
+ image: docker.io/bitnami/postgresql:15.4.0-debian-11-r45
+ licenses: Apache-2.0
+apiVersion: v2
+appVersion: 15.4.0
+dependencies:
+- name: common
+ repository: oci://registry-1.docker.io/bitnamicharts
+ tags:
+ - bitnami-common
+ version: 2.x.x
+description: PostgreSQL (Postgres) is an open source object-relational database known
+ for reliability and data integrity. ACID-compliant, it supports foreign keys, joins,
+ views, triggers and stored procedures.
+home: https://bitnami.com
+icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-220x234.png
+keywords:
+- postgresql
+- postgres
+- database
+- sql
+- replication
+- cluster
+maintainers:
+- name: VMware, Inc.
+ url: https://github.com/bitnami/charts
+name: postgresql
+sources:
+- https://github.com/bitnami/charts/tree/main/bitnami/postgresql
+version: 12.12.10
diff --git a/charts/openproject/charts/postgresql/README.md b/charts/openproject/charts/postgresql/README.md
new file mode 100644
index 0000000..09cf3fb
--- /dev/null
+++ b/charts/openproject/charts/postgresql/README.md
@@ -0,0 +1,743 @@
+<!--- app-name: PostgreSQL -->
+
+# PostgreSQL packaged by Bitnami
+
+PostgreSQL (Postgres) is an open source object-relational database known for reliability and data integrity. ACID-compliant, it supports foreign keys, joins, views, triggers and stored procedures.
+
+[Overview of PostgreSQL](http://www.postgresql.org)
+
+Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement.
+
+## TL;DR
+
+```console
+helm install my-release oci://registry-1.docker.io/bitnamicharts/postgresql
+```
+
+## Introduction
+
+This chart bootstraps a [PostgreSQL](https://github.com/bitnami/containers/tree/main/bitnami/postgresql) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+For HA, please see [this repo](https://github.com/bitnami/charts/tree/main/bitnami/postgresql-ha)
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
+
+Looking to use PostgreSQL in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog.
+
+## Prerequisites
+
+- Kubernetes 1.19+
+- Helm 3.2.0+
+- PV provisioner support in the underlying infrastructure
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```console
+helm install my-release oci://registry-1.docker.io/bitnamicharts/postgresql
+```
+
+The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```console
+helm delete my-release
+```
+
+The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release.
+
+To delete the PVC's associated with `my-release`:
+
+```console
+kubectl delete pvc -l release=my-release
+```
+
+> **Note**: Deleting the PVC's will delete postgresql data as well. Please be cautious before doing it.
+
+## Parameters
+
+### Global parameters
+
+| Name | Description | Value |
+| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----- |
+| `global.imageRegistry` | Global Docker image registry | `""` |
+| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
+| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
+| `global.postgresql.auth.postgresPassword` | Password for the "postgres" admin user (overrides `auth.postgresPassword`) | `""` |
+| `global.postgresql.auth.username` | Name for a custom user to create (overrides `auth.username`) | `""` |
+| `global.postgresql.auth.password` | Password for the custom user to create (overrides `auth.password`) | `""` |
+| `global.postgresql.auth.database` | Name for a custom database to create (overrides `auth.database`) | `""` |
+| `global.postgresql.auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`). | `""` |
+| `global.postgresql.auth.secretKeys.adminPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` |
+| `global.postgresql.auth.secretKeys.userPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` |
+| `global.postgresql.auth.secretKeys.replicationPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` |
+| `global.postgresql.service.ports.postgresql` | PostgreSQL service port (overrides `service.ports.postgresql`) | `""` |
+
+### Common parameters
+
+| Name | Description | Value |
+| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- |
+| `kubeVersion` | Override Kubernetes version | `""` |
+| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` |
+| `fullnameOverride` | String to fully override common.names.fullname template | `""` |
+| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` |
+| `extraDeploy` | Array of extra objects to deploy with the release (evaluated as a template) | `[]` |
+| `commonLabels` | Add labels to all the deployed resources | `{}` |
+| `commonAnnotations` | Add annotations to all the deployed resources | `{}` |
+| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` |
+| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` |
+| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` |
+
+### PostgreSQL common parameters
+
+| Name | Description | Value |
+| ---------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- |
+| `image.registry` | PostgreSQL image registry | `docker.io` |
+| `image.repository` | PostgreSQL image repository | `bitnami/postgresql` |
+| `image.tag` | PostgreSQL image tag (immutable tags are recommended) | `15.4.0-debian-11-r45` |
+| `image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
+| `image.pullPolicy` | PostgreSQL image pull policy | `IfNotPresent` |
+| `image.pullSecrets` | Specify image pull secrets | `[]` |
+| `image.debug` | Specify if debug values should be set | `false` |
+| `auth.enablePostgresUser` | Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user | `true` |
+| `auth.postgresPassword` | Password for the "postgres" admin user. Ignored if `auth.existingSecret` is provided | `""` |
+| `auth.username` | Name for a custom user to create | `""` |
+| `auth.password` | Password for the custom user to create. Ignored if `auth.existingSecret` is provided | `""` |
+| `auth.database` | Name for a custom database to create | `""` |
+| `auth.replicationUsername` | Name of the replication user | `repl_user` |
+| `auth.replicationPassword` | Password for the replication user. Ignored if `auth.existingSecret` is provided | `""` |
+| `auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. | `""` |
+| `auth.secretKeys.adminPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `postgres-password` |
+| `auth.secretKeys.userPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `password` |
+| `auth.secretKeys.replicationPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `replication-password` |
+| `auth.usePasswordFiles` | Mount credentials as a files instead of using an environment variable | `false` |
+| `architecture` | PostgreSQL architecture (`standalone` or `replication`) | `standalone` |
+| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` |
+| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. | `0` |
+| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` |
+| `containerPorts.postgresql` | PostgreSQL container port | `5432` |
+| `audit.logHostname` | Log client hostnames | `false` |
+| `audit.logConnections` | Add client log-in operations to the log file | `false` |
+| `audit.logDisconnections` | Add client log-outs operations to the log file | `false` |
+| `audit.pgAuditLog` | Add operations to log using the pgAudit extension | `""` |
+| `audit.pgAuditLogCatalog` | Log catalog using pgAudit | `off` |
+| `audit.clientMinMessages` | Message log level to share with the user | `error` |
+| `audit.logLinePrefix` | Template for log line prefix (default if not set) | `""` |
+| `audit.logTimezone` | Timezone for the log timestamps | `""` |
+| `ldap.enabled` | Enable LDAP support | `false` |
+| `ldap.server` | IP address or name of the LDAP server. | `""` |
+| `ldap.port` | Port number on the LDAP server to connect to | `""` |
+| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `""` |
+| `ldap.suffix` | String to append to the user name when forming the DN to bind | `""` |
+| `ldap.basedn` | Root DN to begin the search for the user in | `""` |
+| `ldap.binddn` | DN of user to bind to LDAP | `""` |
+| `ldap.bindpw` | Password for the user to bind to LDAP | `""` |
+| `ldap.searchAttribute` | Attribute to match against the user name in the search | `""` |
+| `ldap.searchFilter` | The search filter to use when doing search+bind authentication | `""` |
+| `ldap.scheme` | Set to `ldaps` to use LDAPS | `""` |
+| `ldap.tls.enabled` | Se to true to enable TLS encryption | `false` |
+| `ldap.uri` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored. | `""` |
+| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql/data` |
+| `postgresqlSharedPreloadLibraries` | Shared preload libraries (comma-separated list) | `pgaudit` |
+| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) | `true` |
+| `shmVolume.sizeLimit` | Set this to enable a size limit on the shm tmpfs | `""` |
+| `tls.enabled` | Enable TLS traffic support | `false` |
+| `tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` |
+| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` |
+| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `""` |
+| `tls.certFilename` | Certificate filename | `""` |
+| `tls.certKeyFilename` | Certificate key filename | `""` |
+| `tls.certCAFilename` | CA Certificate filename | `""` |
+| `tls.crlFilename` | File containing a Certificate Revocation List | `""` |
+
+### PostgreSQL Primary parameters
+
+| Name | Description | Value |
+| ----------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------------- |
+| `primary.name` | Name of the primary database (eg primary, master, leader, ...) | `primary` |
+| `primary.configuration` | PostgreSQL Primary main configuration to be injected as ConfigMap | `""` |
+| `primary.pgHbaConfiguration` | PostgreSQL Primary client authentication configuration | `""` |
+| `primary.existingConfigmap` | Name of an existing ConfigMap with PostgreSQL Primary configuration | `""` |
+| `primary.extendedConfiguration` | Extended PostgreSQL Primary configuration (appended to main or default configuration) | `""` |
+| `primary.existingExtendedConfigmap` | Name of an existing ConfigMap with PostgreSQL Primary extended configuration | `""` |
+| `primary.initdb.args` | PostgreSQL initdb extra arguments | `""` |
+| `primary.initdb.postgresqlWalDir` | Specify a custom location for the PostgreSQL transaction log | `""` |
+| `primary.initdb.scripts` | Dictionary of initdb scripts | `{}` |
+| `primary.initdb.scriptsConfigMap` | ConfigMap with scripts to be run at first boot | `""` |
+| `primary.initdb.scriptsSecret` | Secret with scripts to be run at first boot (in case it contains sensitive information) | `""` |
+| `primary.initdb.user` | Specify the PostgreSQL username to execute the initdb scripts | `""` |
+| `primary.initdb.password` | Specify the PostgreSQL password to execute the initdb scripts | `""` |
+| `primary.standby.enabled` | Whether to enable current cluster's primary as standby server of another cluster or not | `false` |
+| `primary.standby.primaryHost` | The Host of replication primary in the other cluster | `""` |
+| `primary.standby.primaryPort` | The Port of replication primary in the other cluster | `""` |
+| `primary.extraEnvVars` | Array with extra environment variables to add to PostgreSQL Primary nodes | `[]` |
+| `primary.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes | `""` |
+| `primary.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for PostgreSQL Primary nodes | `""` |
+| `primary.command` | Override default container command (useful when using custom images) | `[]` |
+| `primary.args` | Override default container args (useful when using custom images) | `[]` |
+| `primary.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL Primary containers | `true` |
+| `primary.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` |
+| `primary.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
+| `primary.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
+| `primary.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
+| `primary.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
+| `primary.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL Primary containers | `true` |
+| `primary.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
+| `primary.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
+| `primary.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` |
+| `primary.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` |
+| `primary.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
+| `primary.startupProbe.enabled` | Enable startupProbe on PostgreSQL Primary containers | `false` |
+| `primary.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` |
+| `primary.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
+| `primary.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` |
+| `primary.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` |
+| `primary.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
+| `primary.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
+| `primary.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
+| `primary.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
+| `primary.lifecycleHooks` | for the PostgreSQL Primary container to automate configuration before or after startup | `{}` |
+| `primary.resources.limits` | The resources limits for the PostgreSQL Primary containers | `{}` |
+| `primary.resources.requests.memory` | The requested memory for the PostgreSQL Primary containers | `256Mi` |
+| `primary.resources.requests.cpu` | The requested cpu for the PostgreSQL Primary containers | `250m` |
+| `primary.podSecurityContext.enabled` | Enable security context | `true` |
+| `primary.podSecurityContext.fsGroup` | Group ID for the pod | `1001` |
+| `primary.containerSecurityContext.enabled` | Enable container security context | `true` |
+| `primary.containerSecurityContext.runAsUser` | User ID for the container | `1001` |
+| `primary.containerSecurityContext.runAsGroup` | Group ID for the container | `0` |
+| `primary.containerSecurityContext.runAsNonRoot` | Set runAsNonRoot for the container | `true` |
+| `primary.containerSecurityContext.allowPrivilegeEscalation` | Set allowPrivilegeEscalation for the container | `false` |
+| `primary.containerSecurityContext.seccompProfile.type` | Set seccompProfile.type for the container | `RuntimeDefault` |
+| `primary.containerSecurityContext.capabilities.drop` | Set capabilities.drop for the container | `["ALL"]` |
+| `primary.hostAliases` | PostgreSQL primary pods host aliases | `[]` |
+| `primary.hostNetwork` | Specify if host network should be enabled for PostgreSQL pod (postgresql primary) | `false` |
+| `primary.hostIPC` | Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) | `false` |
+| `primary.labels` | Map of labels to add to the statefulset (postgresql primary) | `{}` |
+| `primary.annotations` | Annotations for PostgreSQL primary pods | `{}` |
+| `primary.podLabels` | Map of labels to add to the pods (postgresql primary) | `{}` |
+| `primary.podAnnotations` | Map of annotations to add to the pods (postgresql primary) | `{}` |
+| `primary.podAffinityPreset` | PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `primary.podAntiAffinityPreset` | PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` |
+| `primary.nodeAffinityPreset.type` | PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `primary.nodeAffinityPreset.key` | PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. | `""` |
+| `primary.nodeAffinityPreset.values` | PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. | `[]` |
+| `primary.affinity` | Affinity for PostgreSQL primary pods assignment | `{}` |
+| `primary.nodeSelector` | Node labels for PostgreSQL primary pods assignment | `{}` |
+| `primary.tolerations` | Tolerations for PostgreSQL primary pods assignment | `[]` |
+| `primary.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
+| `primary.priorityClassName` | Priority Class to use for each pod (postgresql primary) | `""` |
+| `primary.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` |
+| `primary.terminationGracePeriodSeconds` | Seconds PostgreSQL primary pod needs to terminate gracefully | `""` |
+| `primary.updateStrategy.type` | PostgreSQL Primary statefulset strategy type | `RollingUpdate` |
+| `primary.updateStrategy.rollingUpdate` | PostgreSQL Primary statefulset rolling update configuration parameters | `{}` |
+| `primary.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) | `[]` |
+| `primary.extraVolumes` | Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) | `[]` |
+| `primary.sidecars` | Add additional sidecar containers to the PostgreSQL Primary pod(s) | `[]` |
+| `primary.initContainers` | Add additional init containers to the PostgreSQL Primary pod(s) | `[]` |
+| `primary.extraPodSpec` | Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) | `{}` |
+| `primary.service.type` | Kubernetes Service type | `ClusterIP` |
+| `primary.service.ports.postgresql` | PostgreSQL service port | `5432` |
+| `primary.service.nodePorts.postgresql` | Node port for PostgreSQL | `""` |
+| `primary.service.clusterIP` | Static clusterIP or None for headless services | `""` |
+| `primary.service.annotations` | Annotations for PostgreSQL primary service | `{}` |
+| `primary.service.loadBalancerIP` | Load balancer IP if service type is `LoadBalancer` | `""` |
+| `primary.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` |
+| `primary.service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` |
+| `primary.service.extraPorts` | Extra ports to expose in the PostgreSQL primary service | `[]` |
+| `primary.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
+| `primary.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
+| `primary.service.headless.annotations` | Additional custom annotations for headless PostgreSQL primary service | `{}` |
+| `primary.persistence.enabled` | Enable PostgreSQL Primary data persistence using PVC | `true` |
+| `primary.persistence.existingClaim` | Name of an existing PVC to use | `""` |
+| `primary.persistence.mountPath` | The path the volume will be mounted at | `/bitnami/postgresql` |
+| `primary.persistence.subPath` | The subdirectory of the volume to mount to | `""` |
+| `primary.persistence.storageClass` | PVC Storage Class for PostgreSQL Primary data volume | `""` |
+| `primary.persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `["ReadWriteOnce"]` |
+| `primary.persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` |
+| `primary.persistence.annotations` | Annotations for the PVC | `{}` |
+| `primary.persistence.labels` | Labels for the PVC | `{}` |
+| `primary.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` |
+| `primary.persistence.dataSource` | Custom PVC data source | `{}` |
+| `primary.persistentVolumeClaimRetentionPolicy.enabled` | Enable Persistent volume retention policy for Primary Statefulset | `false` |
+| `primary.persistentVolumeClaimRetentionPolicy.whenScaled` | Volume retention behavior when the replica count of the StatefulSet is reduced | `Retain` |
+| `primary.persistentVolumeClaimRetentionPolicy.whenDeleted` | Volume retention behavior that applies when the StatefulSet is deleted | `Retain` |
+
+### PostgreSQL read only replica parameters (only used when `architecture` is set to `replication`)
+
+| Name | Description | Value |
+| ---------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------------- |
+| `readReplicas.name` | Name of the read replicas database (eg secondary, slave, ...) | `read` |
+| `readReplicas.replicaCount` | Number of PostgreSQL read only replicas | `1` |
+| `readReplicas.extendedConfiguration` | Extended PostgreSQL read only replicas configuration (appended to main or default configuration) | `""` |
+| `readReplicas.extraEnvVars` | Array with extra environment variables to add to PostgreSQL read only nodes | `[]` |
+| `readReplicas.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes | `""` |
+| `readReplicas.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for PostgreSQL read only nodes | `""` |
+| `readReplicas.command` | Override default container command (useful when using custom images) | `[]` |
+| `readReplicas.args` | Override default container args (useful when using custom images) | `[]` |
+| `readReplicas.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL read only containers | `true` |
+| `readReplicas.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` |
+| `readReplicas.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
+| `readReplicas.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
+| `readReplicas.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
+| `readReplicas.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
+| `readReplicas.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL read only containers | `true` |
+| `readReplicas.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
+| `readReplicas.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
+| `readReplicas.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` |
+| `readReplicas.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` |
+| `readReplicas.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
+| `readReplicas.startupProbe.enabled` | Enable startupProbe on PostgreSQL read only containers | `false` |
+| `readReplicas.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` |
+| `readReplicas.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
+| `readReplicas.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` |
+| `readReplicas.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` |
+| `readReplicas.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
+| `readReplicas.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
+| `readReplicas.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
+| `readReplicas.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
+| `readReplicas.lifecycleHooks` | for the PostgreSQL read only container to automate configuration before or after startup | `{}` |
+| `readReplicas.resources.limits` | The resources limits for the PostgreSQL read only containers | `{}` |
+| `readReplicas.resources.requests.memory` | The requested memory for the PostgreSQL read only containers | `256Mi` |
+| `readReplicas.resources.requests.cpu` | The requested cpu for the PostgreSQL read only containers | `250m` |
+| `readReplicas.podSecurityContext.enabled` | Enable security context | `true` |
+| `readReplicas.podSecurityContext.fsGroup` | Group ID for the pod | `1001` |
+| `readReplicas.containerSecurityContext.enabled` | Enable container security context | `true` |
+| `readReplicas.containerSecurityContext.runAsUser` | User ID for the container | `1001` |
+| `readReplicas.containerSecurityContext.runAsGroup` | Group ID for the container | `0` |
+| `readReplicas.containerSecurityContext.runAsNonRoot` | Set runAsNonRoot for the container | `true` |
+| `readReplicas.containerSecurityContext.allowPrivilegeEscalation` | Set allowPrivilegeEscalation for the container | `false` |
+| `readReplicas.containerSecurityContext.seccompProfile.type` | Set seccompProfile.type for the container | `RuntimeDefault` |
+| `readReplicas.containerSecurityContext.capabilities.drop` | Set capabilities.drop for the container | `["ALL"]` |
+| `readReplicas.hostAliases` | PostgreSQL read only pods host aliases | `[]` |
+| `readReplicas.hostNetwork` | Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) | `false` |
+| `readReplicas.hostIPC` | Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) | `false` |
+| `readReplicas.labels` | Map of labels to add to the statefulset (PostgreSQL read only) | `{}` |
+| `readReplicas.annotations` | Annotations for PostgreSQL read only pods | `{}` |
+| `readReplicas.podLabels` | Map of labels to add to the pods (PostgreSQL read only) | `{}` |
+| `readReplicas.podAnnotations` | Map of annotations to add to the pods (PostgreSQL read only) | `{}` |
+| `readReplicas.podAffinityPreset` | PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `readReplicas.podAntiAffinityPreset` | PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` |
+| `readReplicas.nodeAffinityPreset.type` | PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `readReplicas.nodeAffinityPreset.key` | PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. | `""` |
+| `readReplicas.nodeAffinityPreset.values` | PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. | `[]` |
+| `readReplicas.affinity` | Affinity for PostgreSQL read only pods assignment | `{}` |
+| `readReplicas.nodeSelector` | Node labels for PostgreSQL read only pods assignment | `{}` |
+| `readReplicas.tolerations` | Tolerations for PostgreSQL read only pods assignment | `[]` |
+| `readReplicas.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
+| `readReplicas.priorityClassName` | Priority Class to use for each pod (PostgreSQL read only) | `""` |
+| `readReplicas.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` |
+| `readReplicas.terminationGracePeriodSeconds` | Seconds PostgreSQL read only pod needs to terminate gracefully | `""` |
+| `readReplicas.updateStrategy.type` | PostgreSQL read only statefulset strategy type | `RollingUpdate` |
+| `readReplicas.updateStrategy.rollingUpdate` | PostgreSQL read only statefulset rolling update configuration parameters | `{}` |
+| `readReplicas.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) | `[]` |
+| `readReplicas.extraVolumes` | Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) | `[]` |
+| `readReplicas.sidecars` | Add additional sidecar containers to the PostgreSQL read only pod(s) | `[]` |
+| `readReplicas.initContainers` | Add additional init containers to the PostgreSQL read only pod(s) | `[]` |
+| `readReplicas.extraPodSpec` | Optionally specify extra PodSpec for the PostgreSQL read only pod(s) | `{}` |
+| `readReplicas.service.type` | Kubernetes Service type | `ClusterIP` |
+| `readReplicas.service.ports.postgresql` | PostgreSQL service port | `5432` |
+| `readReplicas.service.nodePorts.postgresql` | Node port for PostgreSQL | `""` |
+| `readReplicas.service.clusterIP` | Static clusterIP or None for headless services | `""` |
+| `readReplicas.service.annotations` | Annotations for PostgreSQL read only service | `{}` |
+| `readReplicas.service.loadBalancerIP` | Load balancer IP if service type is `LoadBalancer` | `""` |
+| `readReplicas.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` |
+| `readReplicas.service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` |
+| `readReplicas.service.extraPorts` | Extra ports to expose in the PostgreSQL read only service | `[]` |
+| `readReplicas.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
+| `readReplicas.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
+| `readReplicas.service.headless.annotations` | Additional custom annotations for headless PostgreSQL read only service | `{}` |
+| `readReplicas.persistence.enabled` | Enable PostgreSQL read only data persistence using PVC | `true` |
+| `readReplicas.persistence.existingClaim` | Name of an existing PVC to use | `""` |
+| `readReplicas.persistence.mountPath` | The path the volume will be mounted at | `/bitnami/postgresql` |
+| `readReplicas.persistence.subPath` | The subdirectory of the volume to mount to | `""` |
+| `readReplicas.persistence.storageClass` | PVC Storage Class for PostgreSQL read only data volume | `""` |
+| `readReplicas.persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `["ReadWriteOnce"]` |
+| `readReplicas.persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` |
+| `readReplicas.persistence.annotations` | Annotations for the PVC | `{}` |
+| `readReplicas.persistence.labels` | Labels for the PVC | `{}` |
+| `readReplicas.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` |
+| `readReplicas.persistence.dataSource` | Custom PVC data source | `{}` |
+| `readReplicas.persistentVolumeClaimRetentionPolicy.enabled` | Enable Persistent volume retention policy for read only Statefulset | `false` |
+| `readReplicas.persistentVolumeClaimRetentionPolicy.whenScaled` | Volume retention behavior when the replica count of the StatefulSet is reduced | `Retain` |
+| `readReplicas.persistentVolumeClaimRetentionPolicy.whenDeleted` | Volume retention behavior that applies when the StatefulSet is deleted | `Retain` |
+
+### Backup parameters
+
+| Name | Description | Value |
+| ------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| `backup.enabled` | Enable the logical dump of the database "regularly" | `false` |
+| `backup.cronjob.schedule` | Set the cronjob parameter schedule | `@daily` |
+| `backup.cronjob.concurrencyPolicy` | Set the cronjob parameter concurrencyPolicy | `Allow` |
+| `backup.cronjob.failedJobsHistoryLimit` | Set the cronjob parameter failedJobsHistoryLimit | `1` |
+| `backup.cronjob.successfulJobsHistoryLimit` | Set the cronjob parameter successfulJobsHistoryLimit | `3` |
+| `backup.cronjob.startingDeadlineSeconds` | Set the cronjob parameter startingDeadlineSeconds | `""` |
+| `backup.cronjob.ttlSecondsAfterFinished` | Set the cronjob parameter ttlSecondsAfterFinished | `""` |
+| `backup.cronjob.restartPolicy` | Set the cronjob parameter restartPolicy | `OnFailure` |
+| `backup.cronjob.podSecurityContext.enabled` | Enable PodSecurityContext for CronJob/Backup | `true` |
+| `backup.cronjob.podSecurityContext.fsGroup` | Group ID for the CronJob | `1001` |
+| `backup.cronjob.containerSecurityContext.runAsUser` | User ID for the backup container | `1001` |
+| `backup.cronjob.containerSecurityContext.runAsGroup` | Group ID for the backup container | `0` |
+| `backup.cronjob.containerSecurityContext.runAsNonRoot` | Set backup container's Security Context runAsNonRoot | `true` |
+| `backup.cronjob.containerSecurityContext.readOnlyRootFilesystem` | Is the container itself readonly | `true` |
+| `backup.cronjob.containerSecurityContext.allowPrivilegeEscalation` | Is it possible to escalate backup pod(s) privileges | `false` |
+| `backup.cronjob.containerSecurityContext.seccompProfile.type` | Set backup container's Security Context seccompProfile type | `RuntimeDefault` |
+| `backup.cronjob.containerSecurityContext.capabilities.drop` | Set backup container's Security Context capabilities to drop | `["ALL"]` |
+| `backup.cronjob.command` | Set backup container's command to run | `["/bin/sh","-c","pg_dumpall --clean --if-exists --load-via-partition-root --quote-all-identifiers --no-password --file=${PGDUMP_DIR}/pg_dumpall-$(date '+%Y-%m-%d-%H-%M').pgdump"]` |
+| `backup.cronjob.labels` | Set the cronjob labels | `{}` |
+| `backup.cronjob.annotations` | Set the cronjob annotations | `{}` |
+| `backup.cronjob.nodeSelector` | Node labels for PostgreSQL backup CronJob pod assignment | `{}` |
+| `backup.cronjob.storage.existingClaim` | Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`) | `""` |
+| `backup.cronjob.storage.resourcePolicy` | Setting it to "keep" to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted | `""` |
+| `backup.cronjob.storage.storageClass` | PVC Storage Class for the backup data volume | `""` |
+| `backup.cronjob.storage.accessModes` | PV Access Mode | `["ReadWriteOnce"]` |
+| `backup.cronjob.storage.size` | PVC Storage Request for the backup data volume | `8Gi` |
+| `backup.cronjob.storage.annotations` | PVC annotations | `{}` |
+| `backup.cronjob.storage.mountPath` | Path to mount the volume at | `/backup/pgdump` |
+| `backup.cronjob.storage.subPath` | Subdirectory of the volume to mount at | `""` |
+| `backup.cronjob.storage.volumeClaimTemplates.selector` | A label query over volumes to consider for binding (e.g. when using local volumes) | `{}` |
+
+### NetworkPolicy parameters
+
+| Name | Description | Value |
+| ------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
+| `networkPolicy.enabled` | Enable network policies | `false` |
+| `networkPolicy.metrics.enabled` | Enable network policies for metrics (prometheus) | `false` |
+| `networkPolicy.metrics.namespaceSelector` | Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace. | `{}` |
+| `networkPolicy.metrics.podSelector` | Monitoring pod selector labels. These labels will be used to identify the Prometheus pods. | `{}` |
+| `networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled` | Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin. | `false` |
+| `networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector` | Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s). | `{}` |
+| `networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector` | Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s). | `{}` |
+| `networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules` | Custom network policy for the PostgreSQL primary node. | `[]` |
+| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled` | Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin. | `false` |
+| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector` | Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s). | `{}` |
+| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector` | Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s). | `{}` |
+| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules` | Custom network policy for the PostgreSQL read-only nodes. | `[]` |
+| `networkPolicy.egressRules.denyConnectionsToExternal` | Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53). | `false` |
+| `networkPolicy.egressRules.customRules` | Custom network policy rule | `[]` |
+
+### Volume Permissions parameters
+
+| Name | Description | Value |
+| ---------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | ------------------ |
+| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` |
+| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
+| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/os-shell` |
+| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r77` |
+| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
+| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
+| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` |
+| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` |
+| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` |
+| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` |
+| `volumePermissions.containerSecurityContext.runAsGroup` | Group ID for the init container | `0` |
+| `volumePermissions.containerSecurityContext.runAsNonRoot` | runAsNonRoot for the init container | `false` |
+| `volumePermissions.containerSecurityContext.seccompProfile.type` | seccompProfile.type for the init container | `RuntimeDefault` |
+
+### Other Parameters
+
+| Name | Description | Value |
+| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
+| `serviceBindings.enabled` | Create secret for service binding (Experimental) | `false` |
+| `serviceAccount.create` | Enable creation of ServiceAccount for PostgreSQL pod | `false` |
+| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
+| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` |
+| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` |
+| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` |
+| `rbac.rules` | Custom RBAC rules to set | `[]` |
+| `psp.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` |
+
+### Metrics Parameters
+
+| Name | Description | Value |
+| ----------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | --------------------------- |
+| `metrics.enabled` | Start a prometheus exporter | `false` |
+| `metrics.image.registry` | PostgreSQL Prometheus Exporter image registry | `docker.io` |
+| `metrics.image.repository` | PostgreSQL Prometheus Exporter image repository | `bitnami/postgres-exporter` |
+| `metrics.image.tag` | PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) | `0.14.0-debian-11-r2` |
+| `metrics.image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
+| `metrics.image.pullPolicy` | PostgreSQL Prometheus Exporter image pull policy | `IfNotPresent` |
+| `metrics.image.pullSecrets` | Specify image pull secrets | `[]` |
+| `metrics.customMetrics` | Define additional custom metrics | `{}` |
+| `metrics.extraEnvVars` | Extra environment variables to add to PostgreSQL Prometheus exporter | `[]` |
+| `metrics.containerSecurityContext.enabled` | Enable PostgreSQL Prometheus exporter containers' Security Context | `true` |
+| `metrics.containerSecurityContext.runAsUser` | Set PostgreSQL Prometheus exporter containers' Security Context runAsUser | `1001` |
+| `metrics.containerSecurityContext.runAsGroup` | Set PostgreSQL Prometheus exporter containers' Security Context runAsGroup | `0` |
+| `metrics.containerSecurityContext.runAsNonRoot` | Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot | `true` |
+| `metrics.containerSecurityContext.allowPrivilegeEscalation` | Set PostgreSQL Prometheus exporter containers' Security Context allowPrivilegeEscalation | `false` |
+| `metrics.containerSecurityContext.seccompProfile.type` | Set PostgreSQL Prometheus exporter containers' Security Context seccompProfile.type | `RuntimeDefault` |
+| `metrics.containerSecurityContext.capabilities.drop` | Set PostgreSQL Prometheus exporter containers' Security Context capabilities.drop | `["ALL"]` |
+| `metrics.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL Prometheus exporter containers | `true` |
+| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` |
+| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
+| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
+| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
+| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
+| `metrics.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL Prometheus exporter containers | `true` |
+| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
+| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
+| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` |
+| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` |
+| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
+| `metrics.startupProbe.enabled` | Enable startupProbe on PostgreSQL Prometheus exporter containers | `false` |
+| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` |
+| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
+| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` |
+| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` |
+| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
+| `metrics.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
+| `metrics.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
+| `metrics.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
+| `metrics.containerPorts.metrics` | PostgreSQL Prometheus exporter metrics container port | `9187` |
+| `metrics.resources.limits` | The resources limits for the PostgreSQL Prometheus exporter container | `{}` |
+| `metrics.resources.requests` | The requested resources for the PostgreSQL Prometheus exporter container | `{}` |
+| `metrics.service.ports.metrics` | PostgreSQL Prometheus Exporter service port | `9187` |
+| `metrics.service.clusterIP` | Static clusterIP or None for headless services | `""` |
+| `metrics.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` |
+| `metrics.service.annotations` | Annotations for Prometheus to auto-discover the metrics endpoint | `{}` |
+| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` |
+| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` |
+| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` |
+| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` |
+| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` |
+| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` |
+| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
+| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` |
+| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` |
+| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` |
+| `metrics.prometheusRule.enabled` | Create a PrometheusRule for Prometheus Operator | `false` |
+| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` |
+| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` |
+| `metrics.prometheusRule.rules` | PrometheusRule definitions | `[]` |
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+helm install my-release \
+ --set auth.postgresPassword=secretpassword
+ oci://registry-1.docker.io/bitnamicharts/postgresql
+```
+
+The above command sets the PostgreSQL `postgres` account password to `secretpassword`.
+
+> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available.
+> **Warning** Setting a password will be ignored on new installation in case when previous PostgreSQL release was deleted through the helm command. In that case, old PVC will have an old password, and setting it through helm won't take effect. Deleting persistent volumes (PVs) will solve the issue. Refer to [issue 2061](https://github.com/bitnami/charts/issues/2061) for more details
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
+
+```console
+helm install my-release -f values.yaml oci://registry-1.docker.io/bitnamicharts/postgresql
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml)
+
+## Configuration and installation details
+
+### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
+
+It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
+
+Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
+
+### Customizing primary and read replica services in a replicated configuration
+
+At the top level, there is a service object which defines the services for both primary and readReplicas. For deeper customization, there are service objects for both the primary and read types individually. This allows you to override the values in the top level service object so that the primary and read can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the primary and read to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the primary.service or readReplicas.service objects will take precedence over the top level service object.
+
+### Use a different PostgreSQL version
+
+To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter. Refer to the [chart documentation for more information on these parameters and how to use them with images from a private registry](https://docs.bitnami.com/kubernetes/infrastructure/postgresql/configuration/change-image-version/).
+
+### postgresql.conf / pg_hba.conf files as configMap
+
+This helm chart also supports to customize the PostgreSQL configuration file. You can add additional PostgreSQL configuration parameters using the `primary.extendedConfiguration`/`readReplicas.extendedConfiguration` parameters as a string. Alternatively, to replace the entire default configuration use `primary.configuration`.
+
+You can also add a custom pg_hba.conf using the `primary.pgHbaConfiguration` parameter.
+
+In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `primary.existingConfigmap` parameter. Note that this will override the two previous options.
+
+### Initialize a fresh instance
+
+The [Bitnami PostgreSQL](https://github.com/bitnami/containers/tree/main/bitnami/postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, you can specify custom scripts using the `primary.initdb.scripts` parameter as a string.
+
+In addition, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `primary.initdb.scriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `primary.initdb.scriptsSecret` parameter.
+
+The allowed extensions are `.sh`, `.sql` and `.sql.gz`.
+
+### Securing traffic using TLS
+
+TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart:
+
+- `tls.enabled`: Enable TLS support. Defaults to `false`
+- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults.
+- `tls.certFilename`: Certificate filename. No defaults.
+- `tls.certKeyFilename`: Certificate key filename. No defaults.
+
+For example:
+
+- First, create the secret with the cetificates files:
+
+ ```console
+ kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt
+ ```
+
+- Then, use the following parameters:
+
+ ```console
+ volumePermissions.enabled=true
+ tls.enabled=true
+ tls.certificatesSecret="certificates-tls-secret"
+ tls.certFilename="cert.crt"
+ tls.certKeyFilename="cert.key"
+ ```
+
+ > Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `containerSecurityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected.
+
+### Sidecars
+
+If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec.
+
+```yaml
+# For the PostgreSQL primary
+primary:
+ sidecars:
+ - name: your-image-name
+ image: your-image
+ imagePullPolicy: Always
+ ports:
+ - name: portname
+ containerPort: 1234
+# For the PostgreSQL replicas
+readReplicas:
+ sidecars:
+ - name: your-image-name
+ image: your-image
+ imagePullPolicy: Always
+ ports:
+ - name: portname
+ containerPort: 1234
+```
+
+### Metrics
+
+The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml).
+
+The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details.
+
+### Use of global variables
+
+In more complex scenarios, we may have the following tree of dependencies
+
+```text
+ +--------------+
+ | |
+ +------------+ Chart 1 +-----------+
+ | | | |
+ | --------+------+ |
+ | | |
+ | | |
+ | | |
+ | | |
+ v v v
++-------+------+ +--------+------+ +--------+------+
+| | | | | |
+| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 |
+| | | | | |
++--------------+ +---------------+ +---------------+
+```
+
+The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters:
+
+```text
+postgresql.auth.username=testuser
+subchart1.postgresql.auth.username=testuser
+subchart2.postgresql.auth.username=testuser
+postgresql.auth.password=testpass
+subchart1.postgresql.auth.password=testpass
+subchart2.postgresql.auth.password=testpass
+postgresql.auth.database=testdb
+subchart1.postgresql.auth.database=testdb
+subchart2.postgresql.auth.database=testdb
+```
+
+If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows:
+
+```text
+global.postgresql.auth.username=testuser
+global.postgresql.auth.password=testpass
+global.postgresql.auth.database=testdb
+```
+
+This way, the credentials will be available in all of the subcharts.
+
+## Persistence
+
+The [Bitnami PostgreSQL](https://github.com/bitnami/containers/tree/main/bitnami/postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container.
+
+Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube.
+See the [Parameters](#parameters) section to configure the PVC or to disable persistence.
+
+If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to the [code present in the container repository](https://github.com/bitnami/containers/tree/main/bitnami/postgresql). If you need to use those data, please covert them to sql and import after `helm install` finished.
+
+## NetworkPolicy
+
+To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`.
+
+For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace:
+
+```console
+kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}"
+```
+
+With NetworkPolicy enabled, traffic will be limited to just port 5432.
+
+For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL.
+This label will be displayed in the output of a successful install.
+
+## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image
+
+- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image.
+- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift.
+- For OpenShift up to 4.10, let set the volume permissions, security context, runAsUser and fsGroup automatically by OpenShift and disable the predefined settings of the helm chart: primary.securityContext.enabled=false,primary.containerSecurityContext.enabled=false,volumePermissions.enabled=false,shmVolume.enabled=false
+- For OpenShift 4.11 and higher, let set OpenShift the runAsUser and fsGroup automatically. Configure the pod and container security context to restrictive defaults and disable the volume permissions setup: primary.
+ podSecurityContext.fsGroup=null,primary.podSecurityContext.seccompProfile.type=RuntimeDefault,primary.containerSecurityContext.runAsUser=null,primary.containerSecurityContext.allowPrivilegeEscalation=false,primary.containerSecurityContext.runAsNonRoot=true,primary.containerSecurityContext.seccompProfile.type=RuntimeDefault,primary.containerSecurityContext.capabilities.drop=['ALL'],volumePermissions.enabled=false,shmVolume.enabled=false
+
+### Setting Pod's affinity
+
+This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
+
+As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters.
+
+## Troubleshooting
+
+Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
+
+## Upgrading
+
+### To 12.0.0
+
+This major version changes the default PostgreSQL image from 14.x to 15.x. Follow the [official instructions](https://www.postgresql.org/docs/15/upgrading.html) to upgrade to 15.x.
+
+### To any previous version
+
+Refer to the [chart documentation for more information about how to upgrade from previous releases](https://docs.bitnami.com/kubernetes/infrastructure/postgresql/administration/upgrade/).
+
+## License
+
+Copyright © 2023 VMware, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+<http://www.apache.org/licenses/LICENSE-2.0>
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
\ No newline at end of file
diff --git a/charts/openproject/charts/postgresql/charts/common/.helmignore b/charts/openproject/charts/postgresql/charts/common/.helmignore
new file mode 100644
index 0000000..50af031
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/openproject/charts/postgresql/charts/common/Chart.yaml b/charts/openproject/charts/postgresql/charts/common/Chart.yaml
new file mode 100644
index 0000000..662a6d7
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/Chart.yaml
@@ -0,0 +1,23 @@
+annotations:
+ category: Infrastructure
+ licenses: Apache-2.0
+apiVersion: v2
+appVersion: 2.12.0
+description: A Library Helm Chart for grouping common logic between bitnami charts.
+ This chart is not deployable by itself.
+home: https://bitnami.com
+icon: https://bitnami.com/downloads/logos/bitnami-mark.png
+keywords:
+- common
+- helper
+- template
+- function
+- bitnami
+maintainers:
+- name: VMware, Inc.
+ url: https://github.com/bitnami/charts
+name: common
+sources:
+- https://github.com/bitnami/charts
+type: library
+version: 2.12.0
diff --git a/charts/openproject/charts/postgresql/charts/common/README.md b/charts/openproject/charts/postgresql/charts/common/README.md
new file mode 100644
index 0000000..fe6a010
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/README.md
@@ -0,0 +1,235 @@
+# Bitnami Common Library Chart
+
+A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts.
+
+## TL;DR
+
+```yaml
+dependencies:
+ - name: common
+ version: 2.x.x
+ repository: oci://registry-1.docker.io/bitnamicharts
+```
+
+```console
+helm dependency update
+```
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.names.fullname" . }}
+data:
+ myvalue: "Hello World"
+```
+
+## Introduction
+
+This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
+
+Looking to use our applications in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog.
+
+## Prerequisites
+
+- Kubernetes 1.19+
+- Helm 3.2.0+
+
+## Parameters
+
+## Special input schemas
+
+### ImageRoot
+
+```yaml
+registry:
+ type: string
+ description: Docker registry where the image is located
+ example: docker.io
+
+repository:
+ type: string
+ description: Repository and image name
+ example: bitnami/nginx
+
+tag:
+ type: string
+ description: image tag
+ example: 1.16.1-debian-10-r63
+
+pullPolicy:
+ type: string
+ description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+
+pullSecrets:
+ type: array
+ items:
+ type: string
+ description: Optionally specify an array of imagePullSecrets (evaluated as templates).
+
+debug:
+ type: boolean
+ description: Set to true if you would like to see extra information on logs
+ example: false
+
+## An instance would be:
+# registry: docker.io
+# repository: bitnami/nginx
+# tag: 1.16.1-debian-10-r63
+# pullPolicy: IfNotPresent
+# debug: false
+```
+
+### Persistence
+
+```yaml
+enabled:
+ type: boolean
+ description: Whether enable persistence.
+ example: true
+
+storageClass:
+ type: string
+ description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning.
+ example: "-"
+
+accessMode:
+ type: string
+ description: Access mode for the Persistent Volume Storage.
+ example: ReadWriteOnce
+
+size:
+ type: string
+ description: Size the Persistent Volume Storage.
+ example: 8Gi
+
+path:
+ type: string
+ description: Path to be persisted.
+ example: /bitnami
+
+## An instance would be:
+# enabled: true
+# storageClass: "-"
+# accessMode: ReadWriteOnce
+# size: 8Gi
+# path: /bitnami
+```
+
+### ExistingSecret
+
+```yaml
+name:
+ type: string
+ description: Name of the existing secret.
+ example: mySecret
+keyMapping:
+ description: Mapping between the expected key name and the name of the key in the existing secret.
+ type: object
+
+## An instance would be:
+# name: mySecret
+# keyMapping:
+# password: myPasswordKey
+```
+
+#### Example of use
+
+When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets.
+
+```yaml
+# templates/secret.yaml
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ labels:
+ app: {{ include "common.names.fullname" . }}
+type: Opaque
+data:
+ password: {{ .Values.password | b64enc | quote }}
+
+# templates/dpl.yaml
+---
+...
+ env:
+ - name: PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }}
+ key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }}
+...
+
+# values.yaml
+---
+name: mySecret
+keyMapping:
+ password: myPasswordKey
+```
+
+### ValidateValue
+
+#### NOTES.txt
+
+```console
+{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}}
+{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}}
+
+{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
+```
+
+If we force those values to be empty we will see some alerts
+
+```console
+helm install test mychart --set path.to.value00="",path.to.value01=""
+ 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value:
+
+ export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d)
+
+ 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value:
+
+ export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d)
+```
+
+## Upgrading
+
+### To 1.0.0
+
+[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
+
+#### What changes were introduced in this major version?
+
+- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
+- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information.
+- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts
+
+#### Considerations when upgrading to this version
+
+- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues
+- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore
+- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3
+
+#### Useful links
+
+- <https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/>
+- <https://helm.sh/docs/topics/v2_v3_migration/>
+- <https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/>
+
+## License
+
+Copyright © 2023 VMware, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+<http://www.apache.org/licenses/LICENSE-2.0>
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/charts/openproject/charts/postgresql/charts/common/templates/_affinities.tpl b/charts/openproject/charts/postgresql/charts/common/templates/_affinities.tpl
new file mode 100644
index 0000000..e85b1df
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/templates/_affinities.tpl
@@ -0,0 +1,139 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return a soft nodeAffinity definition
+{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.soft" -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: {{ .key }}
+ operator: In
+ values:
+ {{- range .values }}
+ - {{ . | quote }}
+ {{- end }}
+ weight: 1
+{{- end -}}
+
+{{/*
+Return a hard nodeAffinity definition
+{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.hard" -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: {{ .key }}
+ operator: In
+ values:
+ {{- range .values }}
+ - {{ . | quote }}
+ {{- end }}
+{{- end -}}
+
+{{/*
+Return a nodeAffinity definition
+{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes" -}}
+ {{- if eq .type "soft" }}
+ {{- include "common.affinities.nodes.soft" . -}}
+ {{- else if eq .type "hard" }}
+ {{- include "common.affinities.nodes.hard" . -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Return a topologyKey definition
+{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}}
+*/}}
+{{- define "common.affinities.topologyKey" -}}
+{{ .topologyKey | default "kubernetes.io/hostname" -}}
+{{- end -}}
+
+{{/*
+Return a soft podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.soft" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.soft" -}}
+{{- $component := default "" .component -}}
+{{- $customLabels := default (dict) .customLabels -}}
+{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
+{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 10 }}
+ {{- if not (empty $component) }}
+ {{ printf "app.kubernetes.io/component: %s" $component }}
+ {{- end }}
+ {{- range $key, $value := $extraMatchLabels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+ weight: 1
+ {{- range $extraPodAffinityTerms }}
+ - podAffinityTerm:
+ labelSelector:
+ matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 10 }}
+ {{- if not (empty $component) }}
+ {{ printf "app.kubernetes.io/component: %s" $component }}
+ {{- end }}
+ {{- range $key, $value := .extraMatchLabels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+ weight: {{ .weight | default 1 -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Return a hard podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.hard" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.hard" -}}
+{{- $component := default "" .component -}}
+{{- $customLabels := default (dict) .customLabels -}}
+{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
+{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 8 }}
+ {{- if not (empty $component) }}
+ {{ printf "app.kubernetes.io/component: %s" $component }}
+ {{- end }}
+ {{- range $key, $value := $extraMatchLabels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+ {{- range $extraPodAffinityTerms }}
+ - labelSelector:
+ matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 8 }}
+ {{- if not (empty $component) }}
+ {{ printf "app.kubernetes.io/component: %s" $component }}
+ {{- end }}
+ {{- range $key, $value := .extraMatchLabels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Return a podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.pods" -}}
+ {{- if eq .type "soft" }}
+ {{- include "common.affinities.pods.soft" . -}}
+ {{- else if eq .type "hard" }}
+ {{- include "common.affinities.pods.hard" . -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/postgresql/charts/common/templates/_capabilities.tpl b/charts/openproject/charts/postgresql/charts/common/templates/_capabilities.tpl
new file mode 100644
index 0000000..c6d115f
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/templates/_capabilities.tpl
@@ -0,0 +1,185 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return the target Kubernetes version
+*/}}
+{{- define "common.capabilities.kubeVersion" -}}
+{{- if .Values.global }}
+ {{- if .Values.global.kubeVersion }}
+ {{- .Values.global.kubeVersion -}}
+ {{- else }}
+ {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
+ {{- end -}}
+{{- else }}
+{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for poddisruptionbudget.
+*/}}
+{{- define "common.capabilities.policy.apiVersion" -}}
+{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "policy/v1beta1" -}}
+{{- else -}}
+{{- print "policy/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for networkpolicy.
+*/}}
+{{- define "common.capabilities.networkPolicy.apiVersion" -}}
+{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for cronjob.
+*/}}
+{{- define "common.capabilities.cronjob.apiVersion" -}}
+{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "batch/v1beta1" -}}
+{{- else -}}
+{{- print "batch/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for daemonset.
+*/}}
+{{- define "common.capabilities.daemonset.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for deployment.
+*/}}
+{{- define "common.capabilities.deployment.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for statefulset.
+*/}}
+{{- define "common.capabilities.statefulset.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apps/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for ingress.
+*/}}
+{{- define "common.capabilities.ingress.apiVersion" -}}
+{{- if .Values.ingress -}}
+{{- if .Values.ingress.apiVersion -}}
+{{- .Values.ingress.apiVersion -}}
+{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end }}
+{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for RBAC resources.
+*/}}
+{{- define "common.capabilities.rbac.apiVersion" -}}
+{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "rbac.authorization.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "rbac.authorization.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for CRDs.
+*/}}
+{{- define "common.capabilities.crd.apiVersion" -}}
+{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiextensions.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiextensions.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for APIService.
+*/}}
+{{- define "common.capabilities.apiService.apiVersion" -}}
+{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiregistration.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiregistration.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for Horizontal Pod Autoscaler.
+*/}}
+{{- define "common.capabilities.hpa.apiVersion" -}}
+{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}}
+{{- if .beta2 -}}
+{{- print "autoscaling/v2beta2" -}}
+{{- else -}}
+{{- print "autoscaling/v2beta1" -}}
+{{- end -}}
+{{- else -}}
+{{- print "autoscaling/v2" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for Vertical Pod Autoscaler.
+*/}}
+{{- define "common.capabilities.vpa.apiVersion" -}}
+{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}}
+{{- if .beta2 -}}
+{{- print "autoscaling/v2beta2" -}}
+{{- else -}}
+{{- print "autoscaling/v2beta1" -}}
+{{- end -}}
+{{- else -}}
+{{- print "autoscaling/v2" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if the used Helm version is 3.3+.
+A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure.
+This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error.
+**To be removed when the catalog's minimun Helm version is 3.3**
+*/}}
+{{- define "common.capabilities.supportsHelmVersion" -}}
+{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/postgresql/charts/common/templates/_errors.tpl b/charts/openproject/charts/postgresql/charts/common/templates/_errors.tpl
new file mode 100644
index 0000000..07ded6f
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/templates/_errors.tpl
@@ -0,0 +1,28 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Through error when upgrading using empty passwords values that must not be empty.
+
+Usage:
+{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}}
+{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}}
+{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }}
+
+Required password params:
+ - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error.
+ - context - Context - Required. Parent context.
+*/}}
+{{- define "common.errors.upgrade.passwords.empty" -}}
+ {{- $validationErrors := join "" .validationErrors -}}
+ {{- if and $validationErrors .context.Release.IsUpgrade -}}
+ {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}}
+ {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}}
+ {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}}
+ {{- $errorString = print $errorString "\n%s" -}}
+ {{- printf $errorString $validationErrors | fail -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/postgresql/charts/common/templates/_images.tpl b/charts/openproject/charts/postgresql/charts/common/templates/_images.tpl
new file mode 100644
index 0000000..e248d6d
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/templates/_images.tpl
@@ -0,0 +1,101 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Return the proper image name
+{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }}
+*/}}
+{{- define "common.images.image" -}}
+{{- $registryName := .imageRoot.registry -}}
+{{- $repositoryName := .imageRoot.repository -}}
+{{- $separator := ":" -}}
+{{- $termination := .imageRoot.tag | toString -}}
+{{- if .global }}
+ {{- if .global.imageRegistry }}
+ {{- $registryName = .global.imageRegistry -}}
+ {{- end -}}
+{{- end -}}
+{{- if .imageRoot.digest }}
+ {{- $separator = "@" -}}
+ {{- $termination = .imageRoot.digest | toString -}}
+{{- end -}}
+{{- if $registryName }}
+ {{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
+{{- else -}}
+ {{- printf "%s%s%s" $repositoryName $separator $termination -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead)
+{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }}
+*/}}
+{{- define "common.images.pullSecrets" -}}
+ {{- $pullSecrets := list }}
+
+ {{- if .global }}
+ {{- range .global.imagePullSecrets -}}
+ {{- $pullSecrets = append $pullSecrets . -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- range .images -}}
+ {{- range .pullSecrets -}}
+ {{- $pullSecrets = append $pullSecrets . -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- if (not (empty $pullSecrets)) }}
+imagePullSecrets:
+ {{- range $pullSecrets | uniq }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names evaluating values as templates
+{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }}
+*/}}
+{{- define "common.images.renderPullSecrets" -}}
+ {{- $pullSecrets := list }}
+ {{- $context := .context }}
+
+ {{- if $context.Values.global }}
+ {{- range $context.Values.global.imagePullSecrets -}}
+ {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- range .images -}}
+ {{- range .pullSecrets -}}
+ {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- if (not (empty $pullSecrets)) }}
+imagePullSecrets:
+ {{- range $pullSecrets | uniq }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+{{- end -}}
+
+{{/*
+Return the proper image version (ingores image revision/prerelease info & fallbacks to chart appVersion)
+{{ include "common.images.version" ( dict "imageRoot" .Values.path.to.the.image "chart" .Chart ) }}
+*/}}
+{{- define "common.images.version" -}}
+{{- $imageTag := .imageRoot.tag | toString -}}
+{{/* regexp from https://github.com/Masterminds/semver/blob/23f51de38a0866c5ef0bfc42b3f735c73107b700/version.go#L41-L44 */}}
+{{- if regexMatch `^([0-9]+)(\.[0-9]+)?(\.[0-9]+)?(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?$` $imageTag -}}
+ {{- $version := semver $imageTag -}}
+ {{- printf "%d.%d.%d" $version.Major $version.Minor $version.Patch -}}
+{{- else -}}
+ {{- print .chart.AppVersion -}}
+{{- end -}}
+{{- end -}}
+
diff --git a/charts/openproject/charts/postgresql/charts/common/templates/_ingress.tpl b/charts/openproject/charts/postgresql/charts/common/templates/_ingress.tpl
new file mode 100644
index 0000000..efa5b85
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/templates/_ingress.tpl
@@ -0,0 +1,73 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Generate backend entry that is compatible with all Kubernetes API versions.
+
+Usage:
+{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }}
+
+Params:
+ - serviceName - String. Name of an existing service backend
+ - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer.
+ - context - Dict - Required. The context for the template evaluation.
+*/}}
+{{- define "common.ingress.backend" -}}
+{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}}
+{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}}
+serviceName: {{ .serviceName }}
+servicePort: {{ .servicePort }}
+{{- else -}}
+service:
+ name: {{ .serviceName }}
+ port:
+ {{- if typeIs "string" .servicePort }}
+ name: {{ .servicePort }}
+ {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }}
+ number: {{ .servicePort | int }}
+ {{- end }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Print "true" if the API pathType field is supported
+Usage:
+{{ include "common.ingress.supportsPathType" . }}
+*/}}
+{{- define "common.ingress.supportsPathType" -}}
+{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}}
+{{- print "false" -}}
+{{- else -}}
+{{- print "true" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if the ingressClassname field is supported
+Usage:
+{{ include "common.ingress.supportsIngressClassname" . }}
+*/}}
+{{- define "common.ingress.supportsIngressClassname" -}}
+{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "false" -}}
+{{- else -}}
+{{- print "true" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if cert-manager required annotations for TLS signed
+certificates are set in the Ingress annotations
+Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
+Usage:
+{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }}
+*/}}
+{{- define "common.ingress.certManagerRequest" -}}
+{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/postgresql/charts/common/templates/_labels.tpl b/charts/openproject/charts/postgresql/charts/common/templates/_labels.tpl
new file mode 100644
index 0000000..a3cdc2b
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/templates/_labels.tpl
@@ -0,0 +1,40 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Kubernetes standard labels
+{{ include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) -}}
+*/}}
+{{- define "common.labels.standard" -}}
+{{- if and (hasKey . "customLabels") (hasKey . "context") -}}
+{{ merge (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) (dict "app.kubernetes.io/name" (include "common.names.name" .context) "helm.sh/chart" (include "common.names.chart" .context) "app.kubernetes.io/instance" .context.Release.Name "app.kubernetes.io/managed-by" .context.Release.Service "app.kubernetes.io/version" .context.Chart.AppVersion) | toYaml }}
+{{- else -}}
+app.kubernetes.io/name: {{ include "common.names.name" . }}
+helm.sh/chart: {{ include "common.names.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Labels used on immutable fields such as deploy.spec.selector.matchLabels or svc.spec.selector
+{{ include "common.labels.matchLabels" (dict "customLabels" .Values.podLabels "context" $) -}}
+
+We don't want to loop over custom labels appending them to the selector
+since it's very likely that it will break deployments, services, etc.
+However, it's important to overwrite the standard labels if the user
+overwrote them on metadata.labels fields.
+*/}}
+{{- define "common.labels.matchLabels" -}}
+{{- if and (hasKey . "customLabels") (hasKey . "context") -}}
+{{ merge (pick (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) "app.kubernetes.io/name" "app.kubernetes.io/instance") (dict "app.kubernetes.io/name" (include "common.names.name" .context) "app.kubernetes.io/instance" .context.Release.Name ) | toYaml }}
+{{- else -}}
+app.kubernetes.io/name: {{ include "common.names.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/postgresql/charts/common/templates/_names.tpl b/charts/openproject/charts/postgresql/charts/common/templates/_names.tpl
new file mode 100644
index 0000000..a222924
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/templates/_names.tpl
@@ -0,0 +1,71 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "common.names.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "common.names.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "common.names.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified dependency name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+Usage:
+{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }}
+*/}}
+{{- define "common.names.dependency.fullname" -}}
+{{- if .chartValues.fullnameOverride -}}
+{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .chartName .chartValues.nameOverride -}}
+{{- if contains $name .context.Release.Name -}}
+{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Allow the release namespace to be overridden for multi-namespace deployments in combined charts.
+*/}}
+{{- define "common.names.namespace" -}}
+{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a fully qualified app name adding the installation's namespace.
+*/}}
+{{- define "common.names.fullname.namespace" -}}
+{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/charts/openproject/charts/postgresql/charts/common/templates/_secrets.tpl b/charts/openproject/charts/postgresql/charts/common/templates/_secrets.tpl
new file mode 100644
index 0000000..a193c46
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/templates/_secrets.tpl
@@ -0,0 +1,172 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Generate secret name.
+
+Usage:
+{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }}
+
+Params:
+ - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
+ to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+ +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
+ - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment.
+ - context - Dict - Required. The context for the template evaluation.
+*/}}
+{{- define "common.secrets.name" -}}
+{{- $name := (include "common.names.fullname" .context) -}}
+
+{{- if .defaultNameSuffix -}}
+{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- with .existingSecret -}}
+{{- if not (typeIs "string" .) -}}
+{{- with .name -}}
+{{- $name = . -}}
+{{- end -}}
+{{- else -}}
+{{- $name = . -}}
+{{- end -}}
+{{- end -}}
+
+{{- printf "%s" $name -}}
+{{- end -}}
+
+{{/*
+Generate secret key.
+
+Usage:
+{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }}
+
+Params:
+ - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
+ to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+ +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
+ - key - String - Required. Name of the key in the secret.
+*/}}
+{{- define "common.secrets.key" -}}
+{{- $key := .key -}}
+
+{{- if .existingSecret -}}
+ {{- if not (typeIs "string" .existingSecret) -}}
+ {{- if .existingSecret.keyMapping -}}
+ {{- $key = index .existingSecret.keyMapping $.key -}}
+ {{- end -}}
+ {{- end }}
+{{- end -}}
+
+{{- printf "%s" $key -}}
+{{- end -}}
+
+{{/*
+Generate secret password or retrieve one if already created.
+
+Usage:
+{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }}
+
+Params:
+ - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+ - key - String - Required - Name of the key in the secret.
+ - providedValues - List<String> - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
+ - length - int - Optional - Length of the generated random password.
+ - strong - Boolean - Optional - Whether to add symbols to the generated random password.
+ - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart.
+ - context - Context - Required - Parent context.
+ - failOnNew - Boolean - Optional - Default to true. If set to false, skip errors adding new keys to existing secrets.
+The order in which this function returns a secret password:
+ 1. Already existing 'Secret' resource
+ (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned)
+ 2. Password provided via the values.yaml
+ (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned)
+ 3. Randomly generated secret password
+ (A new random secret password with the length specified in the 'length' parameter will be generated and returned)
+
+*/}}
+{{- define "common.secrets.passwords.manage" -}}
+
+{{- $password := "" }}
+{{- $subchart := "" }}
+{{- $failOnNew := default true .failOnNew }}
+{{- $chartName := default "" .chartName }}
+{{- $passwordLength := default 10 .length }}
+{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }}
+{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }}
+{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }}
+{{- if $secretData }}
+ {{- if hasKey $secretData .key }}
+ {{- $password = index $secretData .key | quote }}
+ {{- else if $failOnNew }}
+ {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}}
+ {{- end -}}
+{{- else if $providedPasswordValue }}
+ {{- $password = $providedPasswordValue | toString | b64enc | quote }}
+{{- else }}
+
+ {{- if .context.Values.enabled }}
+ {{- $subchart = $chartName }}
+ {{- end -}}
+
+ {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}}
+ {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}}
+ {{- $passwordValidationErrors := list $requiredPasswordError -}}
+ {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}}
+
+ {{- if .strong }}
+ {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }}
+ {{- $password = randAscii $passwordLength }}
+ {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }}
+ {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }}
+ {{- else }}
+ {{- $password = randAlphaNum $passwordLength | b64enc | quote }}
+ {{- end }}
+{{- end -}}
+{{- printf "%s" $password -}}
+{{- end -}}
+
+{{/*
+Reuses the value from an existing secret, otherwise sets its value to a default value.
+
+Usage:
+{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }}
+
+Params:
+ - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+ - key - String - Required - Name of the key in the secret.
+ - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
+ - context - Context - Required - Parent context.
+
+*/}}
+{{- define "common.secrets.lookup" -}}
+{{- $value := "" -}}
+{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}}
+{{- if and $secretData (hasKey $secretData .key) -}}
+ {{- $value = index $secretData .key -}}
+{{- else if .defaultValue -}}
+ {{- $value = .defaultValue | toString | b64enc -}}
+{{- end -}}
+{{- if $value -}}
+{{- printf "%s" $value -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns whether a previous generated secret already exists
+
+Usage:
+{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }}
+
+Params:
+ - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+ - context - Context - Required - Parent context.
+*/}}
+{{- define "common.secrets.exists" -}}
+{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }}
+{{- if $secret }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/postgresql/charts/common/templates/_storage.tpl b/charts/openproject/charts/postgresql/charts/common/templates/_storage.tpl
new file mode 100644
index 0000000..16405a0
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/templates/_storage.tpl
@@ -0,0 +1,28 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Return the proper Storage Class
+{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }}
+*/}}
+{{- define "common.storage.class" -}}
+
+{{- $storageClass := .persistence.storageClass -}}
+{{- if .global -}}
+ {{- if .global.storageClass -}}
+ {{- $storageClass = .global.storageClass -}}
+ {{- end -}}
+{{- end -}}
+
+{{- if $storageClass -}}
+ {{- if (eq "-" $storageClass) -}}
+ {{- printf "storageClassName: \"\"" -}}
+ {{- else }}
+ {{- printf "storageClassName: %s" $storageClass -}}
+ {{- end -}}
+{{- end -}}
+
+{{- end -}}
diff --git a/charts/openproject/charts/postgresql/charts/common/templates/_tplvalues.tpl b/charts/openproject/charts/postgresql/charts/common/templates/_tplvalues.tpl
new file mode 100644
index 0000000..a8ed763
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/templates/_tplvalues.tpl
@@ -0,0 +1,38 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Renders a value that contains template perhaps with scope if the scope is present.
+Usage:
+{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ ) }}
+{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ "scope" $app ) }}
+*/}}
+{{- define "common.tplvalues.render" -}}
+{{- $value := typeIs "string" .value | ternary .value (.value | toYaml) }}
+{{- if contains "{{" (toJson .value) }}
+ {{- if .scope }}
+ {{- tpl (cat "{{- with $.RelativeScope -}}" $value "{{- end }}") (merge (dict "RelativeScope" .scope) .context) }}
+ {{- else }}
+ {{- tpl $value .context }}
+ {{- end }}
+{{- else }}
+ {{- $value }}
+{{- end }}
+{{- end -}}
+
+{{/*
+Merge a list of values that contains template after rendering them.
+Merge precedence is consistent with http://masterminds.github.io/sprig/dicts.html#merge-mustmerge
+Usage:
+{{ include "common.tplvalues.merge" ( dict "values" (list .Values.path.to.the.Value1 .Values.path.to.the.Value2) "context" $ ) }}
+*/}}
+{{- define "common.tplvalues.merge" -}}
+{{- $dst := dict -}}
+{{- range .values -}}
+{{- $dst = include "common.tplvalues.render" (dict "value" . "context" $.context "scope" $.scope) | fromYaml | merge $dst -}}
+{{- end -}}
+{{ $dst | toYaml }}
+{{- end -}}
diff --git a/charts/openproject/charts/postgresql/charts/common/templates/_utils.tpl b/charts/openproject/charts/postgresql/charts/common/templates/_utils.tpl
new file mode 100644
index 0000000..bfbddf0
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/templates/_utils.tpl
@@ -0,0 +1,77 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Print instructions to get a secret value.
+Usage:
+{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }}
+*/}}
+{{- define "common.utils.secret.getvalue" -}}
+{{- $varname := include "common.utils.fieldToEnvVar" . -}}
+export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d)
+{{- end -}}
+
+{{/*
+Build env var name given a field
+Usage:
+{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }}
+*/}}
+{{- define "common.utils.fieldToEnvVar" -}}
+ {{- $fieldNameSplit := splitList "-" .field -}}
+ {{- $upperCaseFieldNameSplit := list -}}
+
+ {{- range $fieldNameSplit -}}
+ {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}}
+ {{- end -}}
+
+ {{ join "_" $upperCaseFieldNameSplit }}
+{{- end -}}
+
+{{/*
+Gets a value from .Values given
+Usage:
+{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }}
+*/}}
+{{- define "common.utils.getValueFromKey" -}}
+{{- $splitKey := splitList "." .key -}}
+{{- $value := "" -}}
+{{- $latestObj := $.context.Values -}}
+{{- range $splitKey -}}
+ {{- if not $latestObj -}}
+ {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}}
+ {{- end -}}
+ {{- $value = ( index $latestObj . ) -}}
+ {{- $latestObj = $value -}}
+{{- end -}}
+{{- printf "%v" (default "" $value) -}}
+{{- end -}}
+
+{{/*
+Returns first .Values key with a defined value or first of the list if all non-defined
+Usage:
+{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }}
+*/}}
+{{- define "common.utils.getKeyFromList" -}}
+{{- $key := first .keys -}}
+{{- $reverseKeys := reverse .keys }}
+{{- range $reverseKeys }}
+ {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }}
+ {{- if $value -}}
+ {{- $key = . }}
+ {{- end -}}
+{{- end -}}
+{{- printf "%s" $key -}}
+{{- end -}}
+
+{{/*
+Checksum a template at "path" containing a *single* resource (ConfigMap,Secret) for use in pod annotations, excluding the metadata (see #18376).
+Usage:
+{{ include "common.utils.checksumTemplate" (dict "path" "/configmap.yaml" "context" $) }}
+*/}}
+{{- define "common.utils.checksumTemplate" -}}
+{{- $obj := include (print .context.Template.BasePath .path) .context | fromYaml -}}
+{{ omit $obj "apiVersion" "kind" "metadata" | toYaml | sha256sum }}
+{{- end -}}
diff --git a/charts/openproject/charts/postgresql/charts/common/templates/_warnings.tpl b/charts/openproject/charts/postgresql/charts/common/templates/_warnings.tpl
new file mode 100644
index 0000000..66dffc1
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/templates/_warnings.tpl
@@ -0,0 +1,19 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Warning about using rolling tag.
+Usage:
+{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }}
+*/}}
+{{- define "common.warnings.rollingTag" -}}
+
+{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }}
+WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/
+{{- end }}
+
+{{- end -}}
diff --git a/charts/openproject/charts/postgresql/charts/common/templates/validations/_cassandra.tpl b/charts/openproject/charts/postgresql/charts/common/templates/validations/_cassandra.tpl
new file mode 100644
index 0000000..eda9aad
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/templates/validations/_cassandra.tpl
@@ -0,0 +1,77 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate Cassandra required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret"
+ - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.cassandra.passwords" -}}
+ {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}}
+ {{- $enabled := include "common.cassandra.values.enabled" . -}}
+ {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}}
+ {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.cassandra.values.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.cassandra.values.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.cassandra.dbUser.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.dbUser.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled cassandra.
+
+Usage:
+{{ include "common.cassandra.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.cassandra.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.cassandra.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key dbUser
+
+Usage:
+{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.cassandra.values.key.dbUser" -}}
+ {{- if .subchart -}}
+ cassandra.dbUser
+ {{- else -}}
+ dbUser
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/postgresql/charts/common/templates/validations/_mariadb.tpl b/charts/openproject/charts/postgresql/charts/common/templates/validations/_mariadb.tpl
new file mode 100644
index 0000000..17d83a2
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/templates/validations/_mariadb.tpl
@@ -0,0 +1,108 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MariaDB required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret"
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mariadb.passwords" -}}
+ {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}}
+ {{- $enabled := include "common.mariadb.values.enabled" . -}}
+ {{- $architecture := include "common.mariadb.values.architecture" . -}}
+ {{- $authPrefix := include "common.mariadb.values.key.auth" . -}}
+ {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+ {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+ {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+ {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+ {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+ {{- if not (empty $valueUsername) -}}
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+ {{- end -}}
+
+ {{- if (eq $architecture "replication") -}}
+ {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.auth.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mariadb.auth.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.auth.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mariadb.
+
+Usage:
+{{ include "common.mariadb.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mariadb.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.mariadb.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.architecture" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mariadb.architecture -}}
+ {{- else -}}
+ {{- .context.Values.architecture -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.key.auth" -}}
+ {{- if .subchart -}}
+ mariadb.auth
+ {{- else -}}
+ auth
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/postgresql/charts/common/templates/validations/_mongodb.tpl b/charts/openproject/charts/postgresql/charts/common/templates/validations/_mongodb.tpl
new file mode 100644
index 0000000..bbb445b
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/templates/validations/_mongodb.tpl
@@ -0,0 +1,113 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MongoDB® required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret"
+ - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mongodb.passwords" -}}
+ {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}}
+ {{- $enabled := include "common.mongodb.values.enabled" . -}}
+ {{- $authPrefix := include "common.mongodb.values.key.auth" . -}}
+ {{- $architecture := include "common.mongodb.values.architecture" . -}}
+ {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+ {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+ {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}}
+ {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+ {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}}
+ {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}}
+
+ {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+ {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+ {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }}
+ {{- if and $valueUsername $valueDatabase -}}
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+ {{- end -}}
+
+ {{- if (eq $architecture "replicaset") -}}
+ {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.auth.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mongodb.auth.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.auth.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mongodb.
+
+Usage:
+{{ include "common.mongodb.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mongodb.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.mongodb.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.key.auth" -}}
+ {{- if .subchart -}}
+ mongodb.auth
+ {{- else -}}
+ auth
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.architecture" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mongodb.architecture -}}
+ {{- else -}}
+ {{- .context.Values.architecture -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/postgresql/charts/common/templates/validations/_mysql.tpl b/charts/openproject/charts/postgresql/charts/common/templates/validations/_mysql.tpl
new file mode 100644
index 0000000..ca3953f
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/templates/validations/_mysql.tpl
@@ -0,0 +1,108 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MySQL required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret"
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mysql.passwords" -}}
+ {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}}
+ {{- $enabled := include "common.mysql.values.enabled" . -}}
+ {{- $architecture := include "common.mysql.values.architecture" . -}}
+ {{- $authPrefix := include "common.mysql.values.key.auth" . -}}
+ {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+ {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+ {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+ {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+ {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+ {{- if not (empty $valueUsername) -}}
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+ {{- end -}}
+
+ {{- if (eq $architecture "replication") -}}
+ {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.auth.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mysql.auth.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.auth.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mysql.
+
+Usage:
+{{ include "common.mysql.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mysql.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.mysql.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.architecture" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mysql.architecture -}}
+ {{- else -}}
+ {{- .context.Values.architecture -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.key.auth" -}}
+ {{- if .subchart -}}
+ mysql.auth
+ {{- else -}}
+ auth
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/postgresql/charts/common/templates/validations/_postgresql.tpl b/charts/openproject/charts/postgresql/charts/common/templates/validations/_postgresql.tpl
new file mode 100644
index 0000000..8c9aa57
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/templates/validations/_postgresql.tpl
@@ -0,0 +1,134 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate PostgreSQL required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret"
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.postgresql.passwords" -}}
+ {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}}
+ {{- $enabled := include "common.postgresql.values.enabled" . -}}
+ {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}}
+ {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}}
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+ {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}}
+
+ {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}}
+ {{- if (eq $enabledReplication "true") -}}
+ {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to decide whether evaluate global values.
+
+Usage:
+{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }}
+Params:
+ - key - String - Required. Field to be evaluated within global, e.g: "existingSecret"
+*/}}
+{{- define "common.postgresql.values.use.global" -}}
+ {{- if .context.Values.global -}}
+ {{- if .context.Values.global.postgresql -}}
+ {{- index .context.Values.global.postgresql .key | quote -}}
+ {{- end -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.postgresql.values.existingSecret" (dict "context" $) }}
+*/}}
+{{- define "common.postgresql.values.existingSecret" -}}
+ {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}}
+
+ {{- if .subchart -}}
+ {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}}
+ {{- else -}}
+ {{- default (.context.Values.existingSecret | quote) $globalValue -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled postgresql.
+
+Usage:
+{{ include "common.postgresql.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.postgresql.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.postgresql.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key postgressPassword.
+
+Usage:
+{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.key.postgressPassword" -}}
+ {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}}
+
+ {{- if not $globalValue -}}
+ {{- if .subchart -}}
+ postgresql.postgresqlPassword
+ {{- else -}}
+ postgresqlPassword
+ {{- end -}}
+ {{- else -}}
+ global.postgresql.postgresqlPassword
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled.replication.
+
+Usage:
+{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.enabled.replication" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.postgresql.replication.enabled -}}
+ {{- else -}}
+ {{- printf "%v" .context.Values.replication.enabled -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key replication.password.
+
+Usage:
+{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.key.replicationPassword" -}}
+ {{- if .subchart -}}
+ postgresql.replication.password
+ {{- else -}}
+ replication.password
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/postgresql/charts/common/templates/validations/_redis.tpl b/charts/openproject/charts/postgresql/charts/common/templates/validations/_redis.tpl
new file mode 100644
index 0000000..fc0d208
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/templates/validations/_redis.tpl
@@ -0,0 +1,81 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate Redis® required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret"
+ - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.redis.passwords" -}}
+ {{- $enabled := include "common.redis.values.enabled" . -}}
+ {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}}
+ {{- $standarizedVersion := include "common.redis.values.standarized.version" . }}
+
+ {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }}
+ {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }}
+
+ {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }}
+ {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}}
+ {{- if eq $useAuth "true" -}}
+ {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled redis.
+
+Usage:
+{{ include "common.redis.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.redis.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.redis.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right prefix path for the values
+
+Usage:
+{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false
+*/}}
+{{- define "common.redis.values.keys.prefix" -}}
+ {{- if .subchart -}}redis.{{- else -}}{{- end -}}
+{{- end -}}
+
+{{/*
+Checks whether the redis chart's includes the standarizations (version >= 14)
+
+Usage:
+{{ include "common.redis.values.standarized.version" (dict "context" $) }}
+*/}}
+{{- define "common.redis.values.standarized.version" -}}
+
+ {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}}
+ {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }}
+
+ {{- if $standarizedAuthValues -}}
+ {{- true -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/postgresql/charts/common/templates/validations/_validations.tpl b/charts/openproject/charts/postgresql/charts/common/templates/validations/_validations.tpl
new file mode 100644
index 0000000..31ceda8
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/templates/validations/_validations.tpl
@@ -0,0 +1,51 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate values must not be empty.
+
+Usage:
+{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}}
+{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}}
+{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
+
+Validate value params:
+ - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password"
+ - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret"
+ - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password"
+*/}}
+{{- define "common.validations.values.multiple.empty" -}}
+ {{- range .required -}}
+ {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Validate a value must not be empty.
+
+Usage:
+{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }}
+
+Validate value params:
+ - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password"
+ - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret"
+ - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password"
+ - subchart - String - Optional - Name of the subchart that the validated password is part of.
+*/}}
+{{- define "common.validations.values.single.empty" -}}
+ {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }}
+ {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }}
+
+ {{- if not $value -}}
+ {{- $varname := "my-value" -}}
+ {{- $getCurrentValue := "" -}}
+ {{- if and .secret .field -}}
+ {{- $varname = include "common.utils.fieldToEnvVar" . -}}
+ {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}}
+ {{- end -}}
+ {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/postgresql/charts/common/values.yaml b/charts/openproject/charts/postgresql/charts/common/values.yaml
new file mode 100644
index 0000000..9abe0e1
--- /dev/null
+++ b/charts/openproject/charts/postgresql/charts/common/values.yaml
@@ -0,0 +1,8 @@
+# Copyright VMware, Inc.
+# SPDX-License-Identifier: APACHE-2.0
+
+## bitnami/common
+## It is required by CI/CD tools and processes.
+## @skip exampleValue
+##
+exampleValue: common-chart
diff --git a/charts/openproject/charts/postgresql/templates/NOTES.txt b/charts/openproject/charts/postgresql/templates/NOTES.txt
new file mode 100644
index 0000000..73c4a34
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/NOTES.txt
@@ -0,0 +1,115 @@
+CHART NAME: {{ .Chart.Name }}
+CHART VERSION: {{ .Chart.Version }}
+APP VERSION: {{ .Chart.AppVersion }}
+
+** Please be patient while the chart is being deployed **
+
+{{- if .Values.diagnosticMode.enabled }}
+The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with:
+
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }}
+
+Get the list of pods by executing:
+
+ kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}
+
+Access the pod you want to debug by executing
+
+ kubectl exec --namespace {{ .Release.Namespace }} -ti <NAME OF THE POD> -- /opt/bitnami/scripts/postgresql/entrypoint.sh /bin/bash
+
+In order to replicate the container startup scripts execute this command:
+
+ /opt/bitnami/scripts/postgresql/entrypoint.sh /opt/bitnami/scripts/postgresql/run.sh
+
+{{- else }}
+
+{{- $customUser := include "postgresql.v1.username" . }}
+{{- $postgresPassword := include "common.secrets.lookup" (dict "secret" (include "common.names.fullname" .) "key" .Values.auth.secretKeys.adminPasswordKey "defaultValue" (ternary .Values.auth.postgresPassword .Values.auth.password (eq $customUser "postgres")) "context" $) -}}
+{{- $authEnabled := and (not (or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret)) (or $postgresPassword .Values.auth.enablePostgresUser (and (not (empty $customUser)) (ne $customUser "postgres"))) }}
+{{- if not $authEnabled }}
+
+WARNING: PostgreSQL has been configured without authentication, this is not recommended for production environments.
+{{- end }}
+
+PostgreSQL can be accessed via port {{ include "postgresql.v1.service.port" . }} on the following DNS names from within your cluster:
+
+ {{ include "postgresql.v1.primary.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection
+
+{{- if eq .Values.architecture "replication" }}
+
+ {{ include "postgresql.v1.readReplica.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read only connection
+
+{{- end }}
+
+{{- if and (not (empty $customUser)) (ne $customUser "postgres") }}
+{{- if .Values.auth.enablePostgresUser }}
+
+To get the password for "postgres" run:
+
+ export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.v1.secretName" . }} -o jsonpath="{.data.{{include "postgresql.v1.adminPasswordKey" .}}}" | base64 -d)
+{{- end }}
+
+To get the password for "{{ $customUser }}" run:
+
+ export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.v1.secretName" . }} -o jsonpath="{.data.{{include "postgresql.v1.userPasswordKey" .}}}" | base64 -d)
+{{- else }}
+{{- if .Values.auth.enablePostgresUser }}
+
+To get the password for "{{ default "postgres" $customUser }}" run:
+
+ export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.v1.secretName" . }} -o jsonpath="{.data.{{ ternary "password" (include "postgresql.v1.adminPasswordKey" .) (and (not (empty $customUser)) (ne $customUser "postgres")) }}}" | base64 -d)
+{{- end }}
+{{- end }}
+
+To connect to your database run the following command:
+ {{- if $authEnabled }}
+
+ kubectl run {{ include "common.names.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ include "postgresql.v1.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" \
+ --command -- psql --host {{ include "postgresql.v1.primary.fullname" . }} -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.v1.database" . }} {{ include "postgresql.v1.database" . }}{{- else }} postgres{{- end }} -p {{ include "postgresql.v1.service.port" . }}
+ {{- else }}
+
+ kubectl run {{ include "common.names.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ include "postgresql.v1.image" . }} \
+ --command -- psql --host {{ include "postgresql.v1.primary.fullname" . }} -d {{- if include "postgresql.v1.database" . }} {{ include "postgresql.v1.database" . }}{{- else }} postgres{{- end }} -p {{ include "postgresql.v1.service.port" . }}
+ {{- end }}
+
+ > NOTE: If you access the container using bash, make sure that you execute "/opt/bitnami/scripts/postgresql/entrypoint.sh /bin/bash" in order to avoid the error "psql: local user with ID {{ .Values.primary.containerSecurityContext.runAsUser }}} does not exist"
+
+To connect to your database from outside the cluster execute the following commands:
+
+{{- if contains "NodePort" .Values.primary.service.type }}
+
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "postgresql.v1.primary.fullname" . }})
+ {{- if $authEnabled }}
+ PGPASSWORD="$POSTGRES_PASSWORD" psql --host $NODE_IP --port $NODE_PORT -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.v1.database" . }} {{ include "postgresql.v1.database" . }}{{- else }} postgres{{- end }}
+ {{- else }}
+ psql --host $NODE_IP --port $NODE_PORT -d {{- if include "postgresql.v1.database" . }} {{ include "postgresql.v1.database" . }}{{- else }} postgres{{- end }}
+ {{- end }}
+{{- else if contains "LoadBalancer" .Values.primary.service.type }}
+
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "postgresql.v1.primary.fullname" . }}'
+
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "postgresql.v1.primary.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
+ {{- if $authEnabled }}
+ PGPASSWORD="$POSTGRES_PASSWORD" psql --host $SERVICE_IP --port {{ include "postgresql.v1.service.port" . }} -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.v1.database" . }} {{ include "postgresql.v1.database" . }}{{- else }} postgres{{- end }}
+ {{- else }}
+ psql --host $SERVICE_IP --port {{ include "postgresql.v1.service.port" . }} -d {{- if include "postgresql.v1.database" . }} {{ include "postgresql.v1.database" . }}{{- else }} postgres{{- end }}
+ {{- end }}
+{{- else if contains "ClusterIP" .Values.primary.service.type }}
+
+ kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "postgresql.v1.primary.fullname" . }} {{ include "postgresql.v1.service.port" . }}:{{ include "postgresql.v1.service.port" . }} &
+ {{- if $authEnabled }}
+ PGPASSWORD="$POSTGRES_PASSWORD" psql --host 127.0.0.1 -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.v1.database" . }} {{ include "postgresql.v1.database" . }}{{- else }} postgres{{- end }} -p {{ include "postgresql.v1.service.port" . }}
+ {{- else }}
+ psql --host 127.0.0.1 -d {{- if include "postgresql.v1.database" . }} {{ include "postgresql.v1.database" . }}{{- else }} postgres{{- end }} -p {{ include "postgresql.v1.service.port" . }}
+ {{- end }}
+{{- end }}
+{{- end }}
+
+WARNING: The configured password will be ignored on new installation in case when previous PostgreSQL release was deleted through the helm command. In that case, old PVC will have an old password, and setting it through helm won't take effect. Deleting persistent volumes (PVs) will solve the issue.
+
+{{- include "postgresql.v1.validateValues" . -}}
+{{- include "common.warnings.rollingTag" .Values.image -}}
+{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }}
diff --git a/charts/openproject/charts/postgresql/templates/_helpers.tpl b/charts/openproject/charts/postgresql/templates/_helpers.tpl
new file mode 100644
index 0000000..2c5c7f9
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/_helpers.tpl
@@ -0,0 +1,406 @@
+{{/*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Create a default fully qualified app name for PostgreSQL Primary objects
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "postgresql.v1.primary.fullname" -}}
+{{- if eq .Values.architecture "replication" -}}
+ {{- printf "%s-%s" (include "common.names.fullname" .) .Values.primary.name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+ {{- include "common.names.fullname" . -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name for PostgreSQL read-only replicas objects
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "postgresql.v1.readReplica.fullname" -}}
+{{- printf "%s-%s" (include "common.names.fullname" .) .Values.readReplicas.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create the default FQDN for PostgreSQL primary headless service
+We truncate at 63 chars because of the DNS naming spec.
+*/}}
+{{- define "postgresql.v1.primary.svc.headless" -}}
+{{- printf "%s-hl" (include "postgresql.v1.primary.fullname" .) | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create the default FQDN for PostgreSQL read-only replicas headless service
+We truncate at 63 chars because of the DNS naming spec.
+*/}}
+{{- define "postgresql.v1.readReplica.svc.headless" -}}
+{{- printf "%s-hl" (include "postgresql.v1.readReplica.fullname" .) | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Return the proper PostgreSQL image name
+*/}}
+{{- define "postgresql.v1.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper PostgreSQL metrics image name
+*/}}
+{{- define "postgresql.v1.metrics.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper image name (for the init container volume-permissions image)
+*/}}
+{{- define "postgresql.v1.volumePermissions.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names
+*/}}
+{{- define "postgresql.v1.imagePullSecrets" -}}
+{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.volumePermissions.image) "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the name for a custom user to create
+*/}}
+{{- define "postgresql.v1.username" -}}
+{{- if .Values.global.postgresql.auth.username -}}
+ {{- .Values.global.postgresql.auth.username -}}
+{{- else -}}
+ {{- .Values.auth.username -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the name for a custom database to create
+*/}}
+{{- define "postgresql.v1.database" -}}
+{{- if .Values.global.postgresql.auth.database -}}
+ {{- printf "%s" (tpl .Values.global.postgresql.auth.database $) -}}
+{{- else if .Values.auth.database -}}
+ {{- printf "%s" (tpl .Values.auth.database $) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the password secret.
+*/}}
+{{- define "postgresql.v1.secretName" -}}
+{{- if .Values.global.postgresql.auth.existingSecret -}}
+ {{- printf "%s" (tpl .Values.global.postgresql.auth.existingSecret $) -}}
+{{- else if .Values.auth.existingSecret -}}
+ {{- printf "%s" (tpl .Values.auth.existingSecret $) -}}
+{{- else -}}
+ {{- printf "%s" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the replication-password key.
+*/}}
+{{- define "postgresql.v1.replicationPasswordKey" -}}
+{{- if or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret -}}
+ {{- if .Values.global.postgresql.auth.secretKeys.replicationPasswordKey -}}
+ {{- printf "%s" (tpl .Values.global.postgresql.auth.secretKeys.replicationPasswordKey $) -}}
+ {{- else if .Values.auth.secretKeys.replicationPasswordKey -}}
+ {{- printf "%s" (tpl .Values.auth.secretKeys.replicationPasswordKey $) -}}
+ {{- else -}}
+ {{- "replication-password" -}}
+ {{- end -}}
+{{- else -}}
+ {{- "replication-password" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the admin-password key.
+*/}}
+{{- define "postgresql.v1.adminPasswordKey" -}}
+{{- if or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret -}}
+ {{- if .Values.global.postgresql.auth.secretKeys.adminPasswordKey -}}
+ {{- printf "%s" (tpl .Values.global.postgresql.auth.secretKeys.adminPasswordKey $) -}}
+ {{- else if .Values.auth.secretKeys.adminPasswordKey -}}
+ {{- printf "%s" (tpl .Values.auth.secretKeys.adminPasswordKey $) -}}
+ {{- end -}}
+{{- else -}}
+ {{- "postgres-password" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the user-password key.
+*/}}
+{{- define "postgresql.v1.userPasswordKey" -}}
+{{- if or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret -}}
+ {{- if or (empty (include "postgresql.v1.username" .)) (eq (include "postgresql.v1.username" .) "postgres") -}}
+ {{- printf "%s" (include "postgresql.v1.adminPasswordKey" .) -}}
+ {{- else -}}
+ {{- if .Values.global.postgresql.auth.secretKeys.userPasswordKey -}}
+ {{- printf "%s" (tpl .Values.global.postgresql.auth.secretKeys.userPasswordKey $) -}}
+ {{- else if .Values.auth.secretKeys.userPasswordKey -}}
+ {{- printf "%s" (tpl .Values.auth.secretKeys.userPasswordKey $) -}}
+ {{- end -}}
+ {{- end -}}
+{{- else -}}
+ {{- "password" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a secret object should be created
+*/}}
+{{- define "postgresql.v1.createSecret" -}}
+{{- $customUser := include "postgresql.v1.username" . -}}
+{{- $postgresPassword := include "common.secrets.lookup" (dict "secret" (include "common.names.fullname" .) "key" .Values.auth.secretKeys.adminPasswordKey "defaultValue" (ternary (coalesce .Values.global.postgresql.auth.postgresPassword .Values.auth.postgresPassword .Values.global.postgresql.auth.password .Values.auth.password) (coalesce .Values.global.postgresql.auth.postgresPassword .Values.auth.postgresPassword) (or (empty $customUser) (eq $customUser "postgres"))) "context" $) -}}
+{{- if and (not (or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret)) (or $postgresPassword .Values.auth.enablePostgresUser (and (not (empty $customUser)) (ne $customUser "postgres")) (eq .Values.architecture "replication") (and .Values.ldap.enabled (or .Values.ldap.bind_password .Values.ldap.bindpw))) -}}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return PostgreSQL service port
+*/}}
+{{- define "postgresql.v1.service.port" -}}
+{{- if .Values.global.postgresql.service.ports.postgresql -}}
+ {{- .Values.global.postgresql.service.ports.postgresql -}}
+{{- else -}}
+ {{- .Values.primary.service.ports.postgresql -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return PostgreSQL service port
+*/}}
+{{- define "postgresql.v1.readReplica.service.port" -}}
+{{- if .Values.global.postgresql.service.ports.postgresql -}}
+ {{- .Values.global.postgresql.service.ports.postgresql -}}
+{{- else -}}
+ {{- .Values.readReplicas.service.ports.postgresql -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the PostgreSQL primary configuration ConfigMap name.
+*/}}
+{{- define "postgresql.v1.primary.configmapName" -}}
+{{- if .Values.primary.existingConfigmap -}}
+ {{- printf "%s" (tpl .Values.primary.existingConfigmap $) -}}
+{{- else -}}
+ {{- printf "%s-configuration" (include "postgresql.v1.primary.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a configmap object should be created for PostgreSQL primary with the configuration
+*/}}
+{{- define "postgresql.v1.primary.createConfigmap" -}}
+{{- if and (or .Values.primary.configuration .Values.primary.pgHbaConfiguration) (not .Values.primary.existingConfigmap) -}}
+ {{- true -}}
+{{- else -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the PostgreSQL primary extended configuration ConfigMap name.
+*/}}
+{{- define "postgresql.v1.primary.extendedConfigmapName" -}}
+{{- if .Values.primary.existingExtendedConfigmap -}}
+ {{- printf "%s" (tpl .Values.primary.existingExtendedConfigmap $) -}}
+{{- else -}}
+ {{- printf "%s-extended-configuration" (include "postgresql.v1.primary.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the PostgreSQL read replica extended configuration ConfigMap name.
+*/}}
+{{- define "postgresql.v1.readReplicas.extendedConfigmapName" -}}
+ {{- printf "%s-extended-configuration" (include "postgresql.v1.readReplica.fullname" .) -}}
+{{- end -}}
+
+{{/*
+Return true if a configmap object should be created for PostgreSQL primary with the extended configuration
+*/}}
+{{- define "postgresql.v1.primary.createExtendedConfigmap" -}}
+{{- if and .Values.primary.extendedConfiguration (not .Values.primary.existingExtendedConfigmap) -}}
+ {{- true -}}
+{{- else -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a configmap object should be created for PostgreSQL read replica with the extended configuration
+*/}}
+{{- define "postgresql.v1.readReplicas.createExtendedConfigmap" -}}
+{{- if .Values.readReplicas.extendedConfiguration -}}
+ {{- true -}}
+{{- else -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+ Create the name of the service account to use
+ */}}
+{{- define "postgresql.v1.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a configmap should be mounted with PostgreSQL configuration
+*/}}
+{{- define "postgresql.v1.mountConfigurationCM" -}}
+{{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap -}}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the initialization scripts ConfigMap name.
+*/}}
+{{- define "postgresql.v1.initdb.scriptsCM" -}}
+{{- if .Values.primary.initdb.scriptsConfigMap -}}
+ {{- printf "%s" (tpl .Values.primary.initdb.scriptsConfigMap $) -}}
+{{- else -}}
+ {{- printf "%s-init-scripts" (include "postgresql.v1.primary.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if TLS is enabled for LDAP connection
+*/}}
+{{- define "postgresql.v1.ldap.tls.enabled" -}}
+{{- if and (kindIs "string" .Values.ldap.tls) (not (empty .Values.ldap.tls)) -}}
+ {{- true -}}
+{{- else if and (kindIs "map" .Values.ldap.tls) .Values.ldap.tls.enabled -}}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the readiness probe command
+*/}}
+{{- define "postgresql.v1.readinessProbeCommand" -}}
+{{- $customUser := include "postgresql.v1.username" . -}}
+- |
+{{- if (include "postgresql.v1.database" .) }}
+ exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.v1.database" . }} {{- if .Values.tls.enabled }} sslcert={{ include "postgresql.v1.tlsCert" . }} sslkey={{ include "postgresql.v1.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+{{- else }}
+ exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if .Values.tls.enabled }} -d "sslcert={{ include "postgresql.v1.tlsCert" . }} sslkey={{ include "postgresql.v1.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+{{- end }}
+{{- if contains "bitnami/" .Values.image.repository }}
+ [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ]
+{{- end }}
+{{- end -}}
+
+{{/*
+Compile all warnings into a single message, and call fail.
+*/}}
+{{- define "postgresql.v1.validateValues" -}}
+{{- $messages := list -}}
+{{- $messages := append $messages (include "postgresql.v1.validateValues.ldapConfigurationMethod" .) -}}
+{{- $messages := append $messages (include "postgresql.v1.validateValues.psp" .) -}}
+{{- $messages := without $messages "" -}}
+{{- $message := join "\n" $messages -}}
+
+{{- if $message -}}
+{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap
+*/}}
+{{- define "postgresql.v1.validateValues.ldapConfigurationMethod" -}}
+{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) -}}
+postgresql: ldap.url, ldap.server
+ You cannot set both `ldap.url` and `ldap.server` at the same time.
+ Please provide a unique way to configure LDAP.
+ More info at https://www.postgresql.org/docs/current/auth-ldap.html
+{{- end -}}
+{{- end -}}
+
+{{/*
+Validate values of Postgresql - If PSP is enabled RBAC should be enabled too
+*/}}
+{{- define "postgresql.v1.validateValues.psp" -}}
+{{- if and .Values.psp.create (not .Values.rbac.create) -}}
+postgresql: psp.create, rbac.create
+ RBAC should be enabled if PSP is enabled in order for PSP to work.
+ More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the cert file.
+*/}}
+{{- define "postgresql.v1.tlsCert" -}}
+{{- if .Values.tls.autoGenerated -}}
+ {{- printf "/opt/bitnami/postgresql/certs/tls.crt" -}}
+{{- else -}}
+ {{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/postgresql/certs/%s" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the cert key file.
+*/}}
+{{- define "postgresql.v1.tlsCertKey" -}}
+{{- if .Values.tls.autoGenerated -}}
+ {{- printf "/opt/bitnami/postgresql/certs/tls.key" -}}
+{{- else -}}
+{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/postgresql/certs/%s" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the CA cert file.
+*/}}
+{{- define "postgresql.v1.tlsCACert" -}}
+{{- if .Values.tls.autoGenerated -}}
+ {{- printf "/opt/bitnami/postgresql/certs/ca.crt" -}}
+{{- else -}}
+ {{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.certCAFilename -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the CRL file.
+*/}}
+{{- define "postgresql.v1.tlsCRL" -}}
+{{- if .Values.tls.crlFilename -}}
+{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.crlFilename -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a TLS credentials secret object should be created
+*/}}
+{{- define "postgresql.v1.createTlsSecret" -}}
+{{- if and .Values.tls.autoGenerated (not .Values.tls.certificatesSecret) -}}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the CA cert file.
+*/}}
+{{- define "postgresql.v1.tlsSecretName" -}}
+{{- if .Values.tls.autoGenerated -}}
+ {{- printf "%s-crt" (include "common.names.fullname" .) -}}
+{{- else -}}
+ {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }}
+{{- end -}}
+{{- end -}}
diff --git a/charts/openproject/charts/postgresql/templates/backup/cronjob.yaml b/charts/openproject/charts/postgresql/templates/backup/cronjob.yaml
new file mode 100644
index 0000000..72d1bf2
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/backup/cronjob.yaml
@@ -0,0 +1,113 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if .Values.backup.enabled }}
+{{- $customUser := include "postgresql.v1.username" . }}
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: {{ include "postgresql.v1.primary.fullname" . }}-pgdumpall
+ namespace: {{ .Release.Namespace | quote }}
+ {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.backup.cronjob.labels .Values.commonLabels ) "context" . ) }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: pg_dumpall
+ {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.backup.cronjob.annotations .Values.commonAnnotations ) "context" . ) }}
+ {{- if $annotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
+ {{- end }}
+spec:
+ schedule: {{ quote .Values.backup.cronjob.schedule }}
+ concurrencyPolicy: {{ .Values.backup.cronjob.concurrencyPolicy }}
+ failedJobsHistoryLimit: {{ .Values.backup.cronjob.failedJobsHistoryLimit }}
+ successfulJobsHistoryLimit: {{ .Values.backup.cronjob.successfulJobsHistoryLimit }}
+ {{- if .Values.backup.cronjob.startingDeadlineSeconds }}
+ startingDeadlineSeconds: {{ .Values.backup.cronjob.startingDeadlineSeconds }}
+ {{- end }}
+ jobTemplate:
+ spec:
+ {{- if .Values.backup.cronjob.ttlSecondsAfterFinished }}
+ ttlSecondsAfterFinished: {{ .Values.backup.cronjob.ttlSecondsAfterFinished }}
+ {{- end }}
+ template:
+ metadata:
+ labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 12 }}
+ app.kubernetes.io/component: pg_dumpall
+ {{- if $annotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 12 }}
+ {{- end }}
+ spec:
+ {{- include "postgresql.v1.imagePullSecrets" . | nindent 10 }}
+ {{- if .Values.backup.cronjob.nodeSelector }}
+ nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.backup.cronjob.nodeSelector "context" $) | nindent 12 }}
+ {{- end }}
+ containers:
+ - name: {{ include "postgresql.v1.primary.fullname" . }}-pgdumpall
+ image: {{ include "postgresql.v1.image" . }}
+ imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+ env:
+ - name: PGUSER
+ {{- if .Values.auth.enablePostgresUser }}
+ value: postgres
+ {{- else }}
+ value: {{ $customUser | quote }}
+ {{- end }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: PGPASSFILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.v1.adminPasswordKey" .) }}
+ {{- else }}
+ - name: PGPASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.v1.secretName" . }}
+ key: {{ include "postgresql.v1.adminPasswordKey" . }}
+ {{- end }}
+ - name: PGHOST
+ value: {{ include "postgresql.v1.primary.fullname" . }}
+ - name: PGPORT
+ value: {{ include "postgresql.v1.service.port" . | quote }}
+ - name: PGDUMP_DIR
+ value: {{ .Values.backup.cronjob.storage.mountPath }}
+ {{- if .Values.tls.enabled }}
+ - name: PGSSLROOTCERT
+ {{- if .Values.tls.autoGenerated -}}
+ value: /tmp/certs/ca.crt
+ {{- else }}
+ value: {{- printf "/tmp/certs/%s" .Values.tls.certCAFilename -}}
+ {{- end }}
+ {{- end }}
+ command:
+ {{- range .Values.backup.cronjob.command }}
+ - {{ . }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.tls.enabled }}
+ - name: certs
+ mountPath: /certs
+ {{- end }}
+ - name: datadir
+ mountPath: {{ .Values.backup.cronjob.storage.mountPath }}
+ subPath: {{ .Values.backup.cronjob.storage.subPath }}
+ securityContext:
+ {{- include "common.tplvalues.render" ( dict "value" .Values.backup.cronjob.containerSecurityContext "context" $) | nindent 14 }}
+ restartPolicy: {{ .Values.backup.cronjob.restartPolicy }}
+ {{- if .Values.backup.cronjob.podSecurityContext.enabled }}
+ securityContext:
+ fsGroup: {{ .Values.backup.cronjob.podSecurityContext.fsGroup }}
+ {{- end }}
+ volumes:
+ {{- if .Values.tls.enabled }}
+ - name: raw-certificates
+ emptyDir: /tmp/certs
+ {{- end }}
+ {{- if .Values.backup.cronjob.storage.existingClaim }}
+ - name: datadir
+ persistentVolumeClaim:
+ claimName: {{ printf "%s" (tpl .Values.backup.cronjob.storage.existingClaim .) }}
+ {{- else }}
+ - name: datadir
+ persistentVolumeClaim:
+ claimName: {{ include "postgresql.v1.primary.fullname" . }}-pgdumpall
+ {{- end }}
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/backup/pvc.yaml b/charts/openproject/charts/postgresql/templates/backup/pvc.yaml
new file mode 100644
index 0000000..6fe9cbf
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/backup/pvc.yaml
@@ -0,0 +1,34 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if and .Values.backup.enabled (not .Values.backup.cronjob.storage.existingClaim) -}}
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{ include "postgresql.v1.primary.fullname" . }}-pgdumpall
+ namespace: {{ .Release.Namespace | quote }}
+ {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.backup.cronjob.labels .Values.commonLabels ) "context" . ) }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: pg_dumpall
+ {{- if or .Values.backup.cronjob.annotations .Values.commonAnnotations .Values.backup.cronjob.storage.resourcePolicy }}
+ annotations:
+ {{- if or .Values.backup.cronjob.annotations .Values.commonAnnotations }}
+ {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.backup.cronjob.annotations .Values.commonAnnotations ) "context" . ) }}
+ {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.backup.cronjob.storage.resourcePolicy }}
+ helm.sh/resource-policy: {{ .Values.backup.cronjob.storage.resourcePolicy | quote }}
+ {{- end }}
+ {{- end }}
+spec:
+ accessModes:
+ {{- range .Values.backup.cronjob.storage.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.backup.cronjob.storage.size | quote }}
+ {{ include "common.storage.class" (dict "persistence" .Values.backup.cronjob.storage "global" .Values.global) }}
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/extra-list.yaml b/charts/openproject/charts/postgresql/templates/extra-list.yaml
new file mode 100644
index 0000000..2d35a58
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/extra-list.yaml
@@ -0,0 +1,9 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- range .Values.extraDeploy }}
+---
+{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/networkpolicy-egress.yaml b/charts/openproject/charts/postgresql/templates/networkpolicy-egress.yaml
new file mode 100644
index 0000000..b67817c
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/networkpolicy-egress.yaml
@@ -0,0 +1,34 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if and .Values.networkPolicy.enabled (or .Values.networkPolicy.egressRules.denyConnectionsToExternal .Values.networkPolicy.egressRules.customRules) }}
+apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }}
+kind: NetworkPolicy
+metadata:
+ name: {{ printf "%s-egress" (include "common.names.fullname" .) }}
+ namespace: {{ .Release.Namespace }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }}
+ policyTypes:
+ - Egress
+ egress:
+ {{- if .Values.networkPolicy.egressRules.denyConnectionsToExternal }}
+ - ports:
+ - port: 53
+ protocol: UDP
+ - port: 53
+ protocol: TCP
+ - to:
+ - namespaceSelector: {}
+ {{- end }}
+ {{- if .Values.networkPolicy.egressRules.customRules }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.egressRules.customRules "context" $) | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/primary/configmap.yaml b/charts/openproject/charts/postgresql/templates/primary/configmap.yaml
new file mode 100644
index 0000000..7bb8b7f
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/primary/configmap.yaml
@@ -0,0 +1,26 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if (include "postgresql.v1.primary.createConfigmap" .) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-configuration" (include "postgresql.v1.primary.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: primary
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+ {{- if .Values.primary.configuration }}
+ postgresql.conf: |-
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.configuration "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.primary.pgHbaConfiguration }}
+ pg_hba.conf: |
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.pgHbaConfiguration "context" $ ) | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/primary/extended-configmap.yaml b/charts/openproject/charts/postgresql/templates/primary/extended-configmap.yaml
new file mode 100644
index 0000000..456f8ee
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/primary/extended-configmap.yaml
@@ -0,0 +1,20 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if (include "postgresql.v1.primary.createExtendedConfigmap" .) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-extended-configuration" (include "postgresql.v1.primary.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: primary
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+ override.conf: |-
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.extendedConfiguration "context" $ ) | nindent 4 }}
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/primary/initialization-configmap.yaml b/charts/openproject/charts/postgresql/templates/primary/initialization-configmap.yaml
new file mode 100644
index 0000000..80d804a
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/primary/initialization-configmap.yaml
@@ -0,0 +1,17 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if and .Values.primary.initdb.scripts (not .Values.primary.initdb.scriptsConfigMap) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-init-scripts" (include "postgresql.v1.primary.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data: {{- include "common.tplvalues.render" (dict "value" .Values.primary.initdb.scripts "context" .) | nindent 2 }}
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/primary/metrics-configmap.yaml b/charts/openproject/charts/postgresql/templates/primary/metrics-configmap.yaml
new file mode 100644
index 0000000..7da2bcd
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/primary/metrics-configmap.yaml
@@ -0,0 +1,18 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-metrics" (include "postgresql.v1.primary.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+ custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }}
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/primary/metrics-svc.yaml b/charts/openproject/charts/postgresql/templates/primary/metrics-svc.yaml
new file mode 100644
index 0000000..3d94510
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/primary/metrics-svc.yaml
@@ -0,0 +1,31 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if .Values.metrics.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ printf "%s-metrics" (include "postgresql.v1.primary.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: metrics
+ {{- if or .Values.commonAnnotations .Values.metrics.service.annotations }}
+ {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.service.annotations .Values.commonAnnotations ) "context" . ) }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
+ {{- end }}
+spec:
+ type: ClusterIP
+ sessionAffinity: {{ .Values.metrics.service.sessionAffinity }}
+ {{- if .Values.metrics.service.clusterIP }}
+ clusterIP: {{ .Values.metrics.service.clusterIP }}
+ {{- end }}
+ ports:
+ - name: http-metrics
+ port: {{ .Values.metrics.service.ports.metrics }}
+ targetPort: http-metrics
+ {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.podLabels .Values.commonLabels ) "context" . ) }}
+ selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: primary
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/primary/networkpolicy.yaml b/charts/openproject/charts/postgresql/templates/primary/networkpolicy.yaml
new file mode 100644
index 0000000..9da3fb4
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/primary/networkpolicy.yaml
@@ -0,0 +1,61 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if and .Values.networkPolicy.enabled (or .Values.networkPolicy.metrics.enabled .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled) }}
+apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }}
+kind: NetworkPolicy
+metadata:
+ name: {{ printf "%s-ingress" (include "postgresql.v1.primary.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: primary
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ {{- $primaryPodLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.podLabels .Values.commonLabels ) "context" . ) }}
+ podSelector:
+ matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $primaryPodLabels "context" $ ) | nindent 6 }}
+ app.kubernetes.io/component: primary
+ ingress:
+ {{- if and .Values.metrics.enabled .Values.networkPolicy.metrics.enabled (or .Values.networkPolicy.metrics.namespaceSelector .Values.networkPolicy.metrics.podSelector) }}
+ - from:
+ {{- if .Values.networkPolicy.metrics.namespaceSelector }}
+ - namespaceSelector:
+ matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.metrics.namespaceSelector "context" $) | nindent 14 }}
+ {{- end }}
+ {{- if .Values.networkPolicy.metrics.podSelector }}
+ - podSelector:
+ matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.metrics.podSelector "context" $) | nindent 14 }}
+ {{- end }}
+ ports:
+ - port: {{ .Values.metrics.containerPorts.metrics }}
+ {{- end }}
+ {{- if and .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled (or .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector) }}
+ - from:
+ {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector }}
+ - namespaceSelector:
+ matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector "context" $) | nindent 14 }}
+ {{- end }}
+ {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector }}
+ - podSelector:
+ matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector "context" $) | nindent 14 }}
+ {{- end }}
+ ports:
+ - port: {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- if and .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled (eq .Values.architecture "replication") }}
+ - from:
+ {{- $readPodLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.podLabels .Values.commonLabels ) "context" . ) }}
+ - podSelector:
+ matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $readPodLabels "context" $ ) | nindent 14 }}
+ app.kubernetes.io/component: read
+ ports:
+ - port: {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules "context" $) | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/primary/servicemonitor.yaml b/charts/openproject/charts/postgresql/templates/primary/servicemonitor.yaml
new file mode 100644
index 0000000..05d54f3
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/primary/servicemonitor.yaml
@@ -0,0 +1,46 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: {{ include "postgresql.v1.primary.fullname" . }}
+ namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace | quote }}
+ {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.labels .Values.commonLabels ) "context" . ) }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: metrics
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ {{- if .Values.metrics.serviceMonitor.jobLabel }}
+ jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }}
+ {{- end }}
+ selector:
+ {{- $svcLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.selector .Values.commonLabels ) "context" . ) }}
+ matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $svcLabels "context" $ ) | nindent 6 }}
+ app.kubernetes.io/component: metrics
+ endpoints:
+ - port: http-metrics
+ {{- if .Values.metrics.serviceMonitor.interval }}
+ interval: {{ .Values.metrics.serviceMonitor.interval }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
+ scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.relabelings }}
+ relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.metricRelabelings }}
+ metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.honorLabels }}
+ honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }}
+ {{- end }}
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace | quote }}
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/primary/statefulset.yaml b/charts/openproject/charts/postgresql/templates/primary/statefulset.yaml
new file mode 100644
index 0000000..8f8d56d
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/primary/statefulset.yaml
@@ -0,0 +1,653 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- $customUser := include "postgresql.v1.username" . }}
+apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
+kind: StatefulSet
+metadata:
+ name: {{ include "postgresql.v1.primary.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.labels .Values.commonLabels ) "context" . ) }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: primary
+ {{- if or .Values.commonAnnotations .Values.primary.annotations }}
+ {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.annotations .Values.commonAnnotations ) "context" . ) }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
+ {{- end }}
+spec:
+ replicas: 1
+ serviceName: {{ include "postgresql.v1.primary.svc.headless" . }}
+ {{- if .Values.primary.updateStrategy }}
+ updateStrategy: {{- toYaml .Values.primary.updateStrategy | nindent 4 }}
+ {{- end }}
+ {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.podLabels .Values.commonLabels ) "context" . ) }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }}
+ app.kubernetes.io/component: primary
+ template:
+ metadata:
+ name: {{ include "postgresql.v1.primary.fullname" . }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }}
+ app.kubernetes.io/component: primary
+ {{- if or (include "postgresql.v1.primary.createConfigmap" .) (include "postgresql.v1.primary.createExtendedConfigmap" .) .Values.primary.podAnnotations }}
+ annotations:
+ {{- if (include "postgresql.v1.primary.createConfigmap" .) }}
+ checksum/configuration: {{ pick (include (print $.Template.BasePath "/primary/configmap.yaml") . | fromYaml) "data" | toYaml | sha256sum }}
+ {{- end }}
+ {{- if (include "postgresql.v1.primary.createExtendedConfigmap" .) }}
+ checksum/extended-configuration: {{ pick (include (print $.Template.BasePath "/primary/extended-configmap.yaml") . | fromYaml) "data" | toYaml | sha256sum }}
+ {{- end }}
+ {{- if .Values.primary.podAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podAnnotations "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- end }}
+ spec:
+ {{- if .Values.primary.extraPodSpec }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraPodSpec "context" $) | nindent 6 }}
+ {{- end }}
+ serviceAccountName: {{ include "postgresql.v1.serviceAccountName" . }}
+ {{- include "postgresql.v1.imagePullSecrets" . | nindent 6 }}
+ {{- if .Values.primary.hostAliases }}
+ hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.primary.hostAliases "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.primary.affinity }}
+ affinity: {{- include "common.tplvalues.render" (dict "value" .Values.primary.affinity "context" $) | nindent 8 }}
+ {{- else }}
+ affinity:
+ podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAffinityPreset "component" "primary" "customLabels" $podLabels "context" $) | nindent 10 }}
+ podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAntiAffinityPreset "component" "primary" "customLabels" $podLabels "context" $) | nindent 10 }}
+ nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.primary.nodeAffinityPreset.type "key" .Values.primary.nodeAffinityPreset.key "values" .Values.primary.nodeAffinityPreset.values) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.primary.nodeSelector }}
+ nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.nodeSelector "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.primary.tolerations }}
+ tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.primary.tolerations "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.primary.topologySpreadConstraints }}
+ topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.primary.topologySpreadConstraints "context" .) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.primary.priorityClassName }}
+ priorityClassName: {{ .Values.primary.priorityClassName }}
+ {{- end }}
+ {{- if .Values.primary.schedulerName }}
+ schedulerName: {{ .Values.primary.schedulerName | quote }}
+ {{- end }}
+ {{- if .Values.primary.terminationGracePeriodSeconds }}
+ terminationGracePeriodSeconds: {{ .Values.primary.terminationGracePeriodSeconds }}
+ {{- end }}
+ {{- if .Values.primary.podSecurityContext.enabled }}
+ securityContext: {{- omit .Values.primary.podSecurityContext "enabled" | toYaml | nindent 8 }}
+ {{- end }}
+ hostNetwork: {{ .Values.primary.hostNetwork }}
+ hostIPC: {{ .Values.primary.hostIPC }}
+ {{- if or (and .Values.tls.enabled (not .Values.volumePermissions.enabled)) (and .Values.volumePermissions.enabled (or .Values.primary.persistence.enabled .Values.shmVolume.enabled)) .Values.primary.initContainers }}
+ initContainers:
+ {{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }}
+ - name: copy-certs
+ image: {{ include "postgresql.v1.volumePermissions.image" . }}
+ imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+ {{- if .Values.primary.resources }}
+ resources: {{- toYaml .Values.primary.resources | nindent 12 }}
+ {{- end }}
+ # We don't require a privileged container in this case
+ {{- if .Values.primary.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.primary.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ command:
+ - /bin/sh
+ - -ec
+ - |
+ cp /tmp/certs/* /opt/bitnami/postgresql/certs/
+ chmod 600 {{ include "postgresql.v1.tlsCertKey" . }}
+ volumeMounts:
+ - name: raw-certificates
+ mountPath: /tmp/certs
+ - name: postgresql-certificates
+ mountPath: /opt/bitnami/postgresql/certs
+ {{- else if and .Values.volumePermissions.enabled (or .Values.primary.persistence.enabled .Values.shmVolume.enabled) }}
+ - name: init-chmod-data
+ image: {{ include "postgresql.v1.volumePermissions.image" . }}
+ imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+ {{- if .Values.volumePermissions.resources }}
+ resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
+ {{- end }}
+ command:
+ - /bin/sh
+ - -ec
+ - |
+ {{- if .Values.primary.persistence.enabled }}
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.primary.persistence.mountPath }}
+ {{- else }}
+ chown {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} {{ .Values.primary.persistence.mountPath }}
+ {{- end }}
+ mkdir -p {{ .Values.primary.persistence.mountPath }}/data {{- if (include "postgresql.v1.mountConfigurationCM" .) }} {{ .Values.primary.persistence.mountPath }}/conf {{- end }}
+ chmod 700 {{ .Values.primary.persistence.mountPath }}/data {{- if (include "postgresql.v1.mountConfigurationCM" .) }} {{ .Values.primary.persistence.mountPath }}/conf {{- end }}
+ find {{ .Values.primary.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.v1.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ xargs -r chown -R `id -u`:`id -G | cut -d " " -f2`
+ {{- else }}
+ xargs -r chown -R {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ chmod -R 777 /dev/shm
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ cp /tmp/certs/* /opt/bitnami/postgresql/certs/
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/
+ {{- else }}
+ chown -R {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} /opt/bitnami/postgresql/certs/
+ {{- end }}
+ chmod 600 {{ include "postgresql.v1.tlsCertKey" . }}
+ {{- end }}
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }}
+ {{- else }}
+ securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.primary.persistence.enabled }}
+ - name: data
+ mountPath: {{ .Values.primary.persistence.mountPath }}
+ {{- if .Values.primary.persistence.subPath }}
+ subPath: {{ .Values.primary.persistence.subPath }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ - name: dshm
+ mountPath: /dev/shm
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: raw-certificates
+ mountPath: /tmp/certs
+ - name: postgresql-certificates
+ mountPath: /opt/bitnami/postgresql/certs
+ {{- end }}
+ {{- end }}
+ {{- if .Values.primary.initContainers }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.initContainers "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- end }}
+ containers:
+ - name: postgresql
+ image: {{ include "postgresql.v1.image" . }}
+ imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+ {{- if .Values.primary.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.primary.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ {{- else if .Values.primary.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.primary.command "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- else if .Values.primary.args }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.primary.args "context" $) | nindent 12 }}
+ {{- end }}
+ env:
+ - name: BITNAMI_DEBUG
+ value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
+ - name: POSTGRESQL_PORT_NUMBER
+ value: {{ .Values.containerPorts.postgresql | quote }}
+ - name: POSTGRESQL_VOLUME_DIR
+ value: {{ .Values.primary.persistence.mountPath | quote }}
+ {{- if .Values.primary.persistence.mountPath }}
+ - name: PGDATA
+ value: {{ .Values.postgresqlDataDir | quote }}
+ {{- end }}
+ # Authentication
+ {{- if or (eq $customUser "postgres") (empty $customUser) }}
+ {{- if .Values.auth.enablePostgresUser }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: POSTGRES_PASSWORD_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.v1.adminPasswordKey" .) }}
+ {{- else }}
+ - name: POSTGRES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.v1.secretName" . }}
+ key: {{ include "postgresql.v1.adminPasswordKey" . }}
+ {{- end }}
+ {{- else }}
+ - name: ALLOW_EMPTY_PASSWORD
+ value: "true"
+ {{- end }}
+ {{- else }}
+ - name: POSTGRES_USER
+ value: {{ $customUser | quote }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: POSTGRES_PASSWORD_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.v1.userPasswordKey" .) }}
+ {{- else }}
+ - name: POSTGRES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.v1.secretName" . }}
+ key: {{ include "postgresql.v1.userPasswordKey" . }}
+ {{- end }}
+ {{- if .Values.auth.enablePostgresUser }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: POSTGRES_POSTGRES_PASSWORD_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.v1.adminPasswordKey" .) }}
+ {{- else }}
+ - name: POSTGRES_POSTGRES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.v1.secretName" . }}
+ key: {{ include "postgresql.v1.adminPasswordKey" . }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- if (include "postgresql.v1.database" .) }}
+ - name: POSTGRES_DATABASE
+ value: {{ (include "postgresql.v1.database" .) | quote }}
+ {{- end }}
+ # Replication
+ {{- if or (eq .Values.architecture "replication") .Values.primary.standby.enabled }}
+ - name: POSTGRES_REPLICATION_MODE
+ value: {{ ternary "slave" "master" .Values.primary.standby.enabled | quote }}
+ - name: POSTGRES_REPLICATION_USER
+ value: {{ .Values.auth.replicationUsername | quote }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: POSTGRES_REPLICATION_PASSWORD_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.v1.replicationPasswordKey" .) }}
+ {{- else }}
+ - name: POSTGRES_REPLICATION_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.v1.secretName" . }}
+ key: {{ include "postgresql.v1.replicationPasswordKey" . }}
+ {{- end }}
+ {{- if ne .Values.replication.synchronousCommit "off" }}
+ - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE
+ value: {{ .Values.replication.synchronousCommit | quote }}
+ - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS
+ value: {{ .Values.replication.numSynchronousReplicas | quote }}
+ {{- end }}
+ - name: POSTGRES_CLUSTER_APP_NAME
+ value: {{ .Values.replication.applicationName }}
+ {{- end }}
+ # Initdb
+ {{- if .Values.primary.initdb.args }}
+ - name: POSTGRES_INITDB_ARGS
+ value: {{ .Values.primary.initdb.args | quote }}
+ {{- end }}
+ {{- if .Values.primary.initdb.postgresqlWalDir }}
+ - name: POSTGRES_INITDB_WALDIR
+ value: {{ .Values.primary.initdb.postgresqlWalDir | quote }}
+ {{- end }}
+ {{- if .Values.primary.initdb.user }}
+ - name: POSTGRES_INITSCRIPTS_USERNAME
+ value: {{ .Values.primary.initdb.user }}
+ {{- end }}
+ {{- if .Values.primary.initdb.password }}
+ - name: POSTGRES_INITSCRIPTS_PASSWORD
+ value: {{ .Values.primary.initdb.password | quote }}
+ {{- end }}
+ # Standby
+ {{- if .Values.primary.standby.enabled }}
+ - name: POSTGRES_MASTER_HOST
+ value: {{ .Values.primary.standby.primaryHost }}
+ - name: POSTGRES_MASTER_PORT_NUMBER
+ value: {{ .Values.primary.standby.primaryPort | quote }}
+ {{- end }}
+ # LDAP
+ - name: POSTGRESQL_ENABLE_LDAP
+ value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }}
+ {{- if .Values.ldap.enabled }}
+ {{- if or .Values.ldap.url .Values.ldap.uri }}
+ - name: POSTGRESQL_LDAP_URL
+ value: {{ coalesce .Values.ldap.url .Values.ldap.uri }}
+ {{- else }}
+ - name: POSTGRESQL_LDAP_SERVER
+ value: {{ .Values.ldap.server }}
+ - name: POSTGRESQL_LDAP_PORT
+ value: {{ .Values.ldap.port | quote }}
+ - name: POSTGRESQL_LDAP_SCHEME
+ value: {{ .Values.ldap.scheme }}
+ {{- if (include "postgresql.v1.ldap.tls.enabled" .) }}
+ - name: POSTGRESQL_LDAP_TLS
+ value: "1"
+ {{- end }}
+ - name: POSTGRESQL_LDAP_PREFIX
+ value: {{ .Values.ldap.prefix | quote }}
+ - name: POSTGRESQL_LDAP_SUFFIX
+ value: {{ .Values.ldap.suffix | quote }}
+ - name: POSTGRESQL_LDAP_BASE_DN
+ value: {{ coalesce .Values.ldap.baseDN .Values.ldap.basedn }}
+ - name: POSTGRESQL_LDAP_BIND_DN
+ value: {{ coalesce .Values.ldap.bindDN .Values.ldap.binddn}}
+ {{- if or (not (empty .Values.ldap.bind_password)) (not (empty .Values.ldap.bindpw)) }}
+ - name: POSTGRESQL_LDAP_BIND_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.v1.secretName" . }}
+ key: ldap-password
+ {{- end }}
+ - name: POSTGRESQL_LDAP_SEARCH_ATTR
+ value: {{ coalesce .Values.ldap.search_attr .Values.ldap.searchAttribute }}
+ - name: POSTGRESQL_LDAP_SEARCH_FILTER
+ value: {{ coalesce .Values.ldap.search_filter .Values.ldap.searchFilter }}
+ {{- end }}
+ {{- end }}
+ # TLS
+ - name: POSTGRESQL_ENABLE_TLS
+ value: {{ ternary "yes" "no" .Values.tls.enabled | quote }}
+ {{- if .Values.tls.enabled }}
+ - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS
+ value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }}
+ - name: POSTGRESQL_TLS_CERT_FILE
+ value: {{ include "postgresql.v1.tlsCert" . }}
+ - name: POSTGRESQL_TLS_KEY_FILE
+ value: {{ include "postgresql.v1.tlsCertKey" . }}
+ {{- if .Values.tls.certCAFilename }}
+ - name: POSTGRESQL_TLS_CA_FILE
+ value: {{ include "postgresql.v1.tlsCACert" . }}
+ {{- end }}
+ {{- if .Values.tls.crlFilename }}
+ - name: POSTGRESQL_TLS_CRL_FILE
+ value: {{ include "postgresql.v1.tlsCRL" . }}
+ {{- end }}
+ {{- end }}
+ # Audit
+ - name: POSTGRESQL_LOG_HOSTNAME
+ value: {{ .Values.audit.logHostname | quote }}
+ - name: POSTGRESQL_LOG_CONNECTIONS
+ value: {{ .Values.audit.logConnections | quote }}
+ - name: POSTGRESQL_LOG_DISCONNECTIONS
+ value: {{ .Values.audit.logDisconnections | quote }}
+ {{- if .Values.audit.logLinePrefix }}
+ - name: POSTGRESQL_LOG_LINE_PREFIX
+ value: {{ .Values.audit.logLinePrefix | quote }}
+ {{- end }}
+ {{- if .Values.audit.logTimezone }}
+ - name: POSTGRESQL_LOG_TIMEZONE
+ value: {{ .Values.audit.logTimezone | quote }}
+ {{- end }}
+ {{- if .Values.audit.pgAuditLog }}
+ - name: POSTGRESQL_PGAUDIT_LOG
+ value: {{ .Values.audit.pgAuditLog | quote }}
+ {{- end }}
+ - name: POSTGRESQL_PGAUDIT_LOG_CATALOG
+ value: {{ .Values.audit.pgAuditLogCatalog | quote }}
+ # Others
+ - name: POSTGRESQL_CLIENT_MIN_MESSAGES
+ value: {{ .Values.audit.clientMinMessages | quote }}
+ - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES
+ value: {{ .Values.postgresqlSharedPreloadLibraries | quote }}
+ {{- if .Values.primary.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if or .Values.primary.extraEnvVarsCM .Values.primary.extraEnvVarsSecret }}
+ envFrom:
+ {{- if .Values.primary.extraEnvVarsCM }}
+ - configMapRef:
+ name: {{ .Values.primary.extraEnvVarsCM }}
+ {{- end }}
+ {{- if .Values.primary.extraEnvVarsSecret }}
+ - secretRef:
+ name: {{ .Values.primary.extraEnvVarsSecret }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - name: tcp-postgresql
+ containerPort: {{ .Values.containerPorts.postgresql }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.primary.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.primary.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.startupProbe "enabled") "context" $) | nindent 12 }}
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ {{- if (include "postgresql.v1.database" .) }}
+ - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.v1.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.v1.tlsCert" . }} sslkey={{ include "postgresql.v1.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- else }}
+ - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.v1.tlsCert" . }} sslkey={{ include "postgresql.v1.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.primary.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.primary.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.livenessProbe "enabled") "context" $) | nindent 12 }}
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ {{- if (include "postgresql.v1.database" .) }}
+ - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.v1.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.v1.tlsCert" . }} sslkey={{ include "postgresql.v1.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- else }}
+ - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.v1.tlsCert" . }} sslkey={{ include "postgresql.v1.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.primary.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.primary.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.readinessProbe "enabled") "context" $) | nindent 12 }}
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - -e
+ {{- include "postgresql.v1.readinessProbeCommand" . | nindent 16 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.primary.resources }}
+ resources: {{- toYaml .Values.primary.resources | nindent 12 }}
+ {{- end }}
+ {{- if .Values.primary.lifecycleHooks }}
+ lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.primary.lifecycleHooks "context" $) | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ {{- if or .Values.primary.initdb.scriptsConfigMap .Values.primary.initdb.scripts }}
+ - name: custom-init-scripts
+ mountPath: /docker-entrypoint-initdb.d/
+ {{- end }}
+ {{- if .Values.primary.initdb.scriptsSecret }}
+ - name: custom-init-scripts-secret
+ mountPath: /docker-entrypoint-initdb.d/secret
+ {{- end }}
+ {{- if or .Values.primary.extendedConfiguration .Values.primary.existingExtendedConfigmap }}
+ - name: postgresql-extended-config
+ mountPath: {{ .Values.primary.persistence.mountPath }}/conf/conf.d/
+ {{- end }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: postgresql-password
+ mountPath: /opt/bitnami/postgresql/secrets/
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: postgresql-certificates
+ mountPath: /opt/bitnami/postgresql/certs
+ readOnly: true
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ - name: dshm
+ mountPath: /dev/shm
+ {{- end }}
+ {{- if .Values.primary.persistence.enabled }}
+ - name: data
+ mountPath: {{ .Values.primary.persistence.mountPath }}
+ {{- if .Values.primary.persistence.subPath }}
+ subPath: {{ .Values.primary.persistence.subPath }}
+ {{- end }}
+ {{- end }}
+ {{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }}
+ - name: postgresql-config
+ mountPath: {{ .Values.primary.persistence.mountPath }}/conf
+ {{- end }}
+ {{- if .Values.primary.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraVolumeMounts "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.metrics.enabled }}
+ - name: metrics
+ image: {{ include "postgresql.v1.metrics.image" . }}
+ imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
+ {{- if .Values.metrics.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.customMetrics }}
+ args: ["--extend.query-path", "/conf/custom-metrics.yaml"]
+ {{- end }}
+ env:
+ {{- $database := required "In order to enable metrics you need to specify a database (.Values.auth.database or .Values.global.postgresql.auth.database)" (include "postgresql.v1.database" .) }}
+ - name: DATA_SOURCE_URI
+ value: {{ printf "127.0.0.1:%d/%s?sslmode=disable" (int (include "postgresql.v1.service.port" .)) $database }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: DATA_SOURCE_PASS_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.v1.userPasswordKey" .) }}
+ {{- else }}
+ - name: DATA_SOURCE_PASS
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.v1.secretName" . }}
+ key: {{ include "postgresql.v1.userPasswordKey" . }}
+ {{- end }}
+ - name: DATA_SOURCE_USER
+ value: {{ default "postgres" $customUser | quote }}
+ {{- if .Values.metrics.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ ports:
+ - name: http-metrics
+ containerPort: {{ .Values.metrics.containerPorts.metrics }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.metrics.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: http-metrics
+ {{- end }}
+ {{- if .Values.metrics.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /
+ port: http-metrics
+ {{- end }}
+ {{- if .Values.metrics.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /
+ port: http-metrics
+ {{- end }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: postgresql-password
+ mountPath: /opt/bitnami/postgresql/secrets/
+ {{- end }}
+ {{- if .Values.metrics.customMetrics }}
+ - name: custom-metrics
+ mountPath: /conf
+ readOnly: true
+ {{- end }}
+ {{- if .Values.metrics.resources }}
+ resources: {{- toYaml .Values.metrics.resources | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.primary.sidecars }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.sidecars "context" $ ) | nindent 8 }}
+ {{- end }}
+ volumes:
+ {{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }}
+ - name: postgresql-config
+ configMap:
+ name: {{ include "postgresql.v1.primary.configmapName" . }}
+ {{- end }}
+ {{- if or .Values.primary.extendedConfiguration .Values.primary.existingExtendedConfigmap }}
+ - name: postgresql-extended-config
+ configMap:
+ name: {{ include "postgresql.v1.primary.extendedConfigmapName" . }}
+ {{- end }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: postgresql-password
+ secret:
+ secretName: {{ include "postgresql.v1.secretName" . }}
+ {{- end }}
+ {{- if or .Values.primary.initdb.scriptsConfigMap .Values.primary.initdb.scripts }}
+ - name: custom-init-scripts
+ configMap:
+ name: {{ include "postgresql.v1.initdb.scriptsCM" . }}
+ {{- end }}
+ {{- if .Values.primary.initdb.scriptsSecret }}
+ - name: custom-init-scripts-secret
+ secret:
+ secretName: {{ tpl .Values.primary.initdb.scriptsSecret $ }}
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: raw-certificates
+ secret:
+ secretName: {{ include "postgresql.v1.tlsSecretName" . }}
+ - name: postgresql-certificates
+ emptyDir: {}
+ {{- end }}
+ {{- if .Values.primary.extraVolumes }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.extraVolumes "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }}
+ - name: custom-metrics
+ configMap:
+ name: {{ printf "%s-metrics" (include "postgresql.v1.primary.fullname" .) }}
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ - name: dshm
+ emptyDir:
+ medium: Memory
+ {{- if .Values.shmVolume.sizeLimit }}
+ sizeLimit: {{ .Values.shmVolume.sizeLimit }}
+ {{- end }}
+ {{- end }}
+ {{- if and .Values.primary.persistence.enabled .Values.primary.persistence.existingClaim }}
+ - name: data
+ persistentVolumeClaim:
+ claimName: {{ tpl .Values.primary.persistence.existingClaim $ }}
+ {{- else if not .Values.primary.persistence.enabled }}
+ - name: data
+ emptyDir: {}
+ {{- else }}
+ {{- if .Values.primary.persistentVolumeClaimRetentionPolicy.enabled }}
+ persistentVolumeClaimRetentionPolicy:
+ whenDeleted: {{ .Values.primary.persistentVolumeClaimRetentionPolicy.whenDeleted }}
+ whenScaled: {{ .Values.primary.persistentVolumeClaimRetentionPolicy.whenScaled }}
+ {{- end }}
+ volumeClaimTemplates:
+ - apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: data
+ {{- if .Values.primary.persistence.annotations }}
+ annotations: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.annotations "context" $) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.primary.persistence.labels }}
+ labels: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.labels "context" $) | nindent 10 }}
+ {{- end }}
+ spec:
+ accessModes:
+ {{- range .Values.primary.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ {{- if .Values.primary.persistence.dataSource }}
+ dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.dataSource "context" $) | nindent 10 }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.primary.persistence.size | quote }}
+ {{- if .Values.primary.persistence.selector }}
+ selector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.selector "context" $) | nindent 10 }}
+ {{- end }}
+ {{- include "common.storage.class" (dict "persistence" .Values.primary.persistence "global" .Values.global) | nindent 8 }}
+ {{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/primary/svc-headless.yaml b/charts/openproject/charts/postgresql/templates/primary/svc-headless.yaml
new file mode 100644
index 0000000..b18565a
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/primary/svc-headless.yaml
@@ -0,0 +1,36 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "postgresql.v1.primary.svc.headless" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: primary
+ annotations:
+ {{- if or .Values.primary.service.headless.annotations .Values.commonAnnotations }}
+ {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.service.headless.annotations .Values.commonAnnotations ) "context" . ) }}
+ {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
+ {{- end }}
+ # Use this annotation in addition to the actual publishNotReadyAddresses
+ # field below because the annotation will stop being respected soon but the
+ # field is broken in some versions of Kubernetes:
+ # https://github.com/kubernetes/kubernetes/issues/58662
+ service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
+spec:
+ type: ClusterIP
+ clusterIP: None
+ # We want all pods in the StatefulSet to have their addresses published for
+ # the sake of the other Postgresql pods even before they're ready, since they
+ # have to be able to talk to each other in order to become ready.
+ publishNotReadyAddresses: true
+ ports:
+ - name: tcp-postgresql
+ port: {{ template "postgresql.v1.service.port" . }}
+ targetPort: tcp-postgresql
+ {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.podLabels .Values.commonLabels ) "context" . ) }}
+ selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: primary
diff --git a/charts/openproject/charts/postgresql/templates/primary/svc.yaml b/charts/openproject/charts/postgresql/templates/primary/svc.yaml
new file mode 100644
index 0000000..90f7e46
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/primary/svc.yaml
@@ -0,0 +1,51 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "postgresql.v1.primary.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: primary
+ {{- if or .Values.commonAnnotations .Values.primary.service.annotations }}
+ {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.service.annotations .Values.commonAnnotations ) "context" . ) }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
+ {{- end }}
+spec:
+ type: {{ .Values.primary.service.type }}
+ {{- if or (eq .Values.primary.service.type "LoadBalancer") (eq .Values.primary.service.type "NodePort") }}
+ externalTrafficPolicy: {{ .Values.primary.service.externalTrafficPolicy | quote }}
+ {{- end }}
+ {{- if and (eq .Values.primary.service.type "LoadBalancer") (not (empty .Values.primary.service.loadBalancerSourceRanges)) }}
+ loadBalancerSourceRanges: {{ .Values.primary.service.loadBalancerSourceRanges | toJson}}
+ {{- end }}
+ {{- if and (eq .Values.primary.service.type "LoadBalancer") (not (empty .Values.primary.service.loadBalancerIP)) }}
+ loadBalancerIP: {{ .Values.primary.service.loadBalancerIP }}
+ {{- end }}
+ {{- if and .Values.primary.service.clusterIP (eq .Values.primary.service.type "ClusterIP") }}
+ clusterIP: {{ .Values.primary.service.clusterIP }}
+ {{- end }}
+ {{- if .Values.primary.service.sessionAffinity }}
+ sessionAffinity: {{ .Values.primary.service.sessionAffinity }}
+ {{- end }}
+ {{- if .Values.primary.service.sessionAffinityConfig }}
+ sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.sessionAffinityConfig "context" $) | nindent 4 }}
+ {{- end }}
+ ports:
+ - name: tcp-postgresql
+ port: {{ template "postgresql.v1.service.port" . }}
+ targetPort: tcp-postgresql
+ {{- if and (or (eq .Values.primary.service.type "NodePort") (eq .Values.primary.service.type "LoadBalancer")) (not (empty .Values.primary.service.nodePorts.postgresql)) }}
+ nodePort: {{ .Values.primary.service.nodePorts.postgresql }}
+ {{- else if eq .Values.primary.service.type "ClusterIP" }}
+ nodePort: null
+ {{- end }}
+ {{- if .Values.primary.service.extraPorts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.extraPorts "context" $) | nindent 4 }}
+ {{- end }}
+ {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.podLabels .Values.commonLabels ) "context" . ) }}
+ selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: primary
diff --git a/charts/openproject/charts/postgresql/templates/prometheusrule.yaml b/charts/openproject/charts/postgresql/templates/prometheusrule.yaml
new file mode 100644
index 0000000..6cdb087
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/prometheusrule.yaml
@@ -0,0 +1,22 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: PrometheusRule
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ namespace: {{ default .Release.Namespace .Values.metrics.prometheusRule.namespace | quote }}
+ {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.prometheusRule.labels .Values.commonLabels ) "context" . ) }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: metrics
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ groups:
+ - name: {{ include "common.names.fullname" . }}
+ rules: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.rules "context" $ ) | nindent 8 }}
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/psp.yaml b/charts/openproject/charts/postgresql/templates/psp.yaml
new file mode 100644
index 0000000..f6bf59f
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/psp.yaml
@@ -0,0 +1,43 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}}
+{{- if and $pspAvailable .Values.psp.create }}
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ privileged: false
+ volumes:
+ - 'configMap'
+ - 'secret'
+ - 'persistentVolumeClaim'
+ - 'emptyDir'
+ - 'projected'
+ hostNetwork: false
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ rule: 'RunAsAny'
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1
+ max: 65535
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1
+ max: 65535
+ readOnlyRootFilesystem: false
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/read/extended-configmap.yaml b/charts/openproject/charts/postgresql/templates/read/extended-configmap.yaml
new file mode 100644
index 0000000..efa87bb
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/read/extended-configmap.yaml
@@ -0,0 +1,20 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if (include "postgresql.v1.readReplicas.createExtendedConfigmap" .) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-extended-configuration" (include "postgresql.v1.readReplica.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: read
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+ override.conf: |-
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.extendedConfiguration "context" $ ) | nindent 4 }}
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/read/metrics-configmap.yaml b/charts/openproject/charts/postgresql/templates/read/metrics-configmap.yaml
new file mode 100644
index 0000000..a1e06bf
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/read/metrics-configmap.yaml
@@ -0,0 +1,18 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if and .Values.metrics.enabled .Values.metrics.customMetrics (eq .Values.architecture "replication") }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-metrics" (include "postgresql.v1.readReplica.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+ custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }}
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/read/metrics-svc.yaml b/charts/openproject/charts/postgresql/templates/read/metrics-svc.yaml
new file mode 100644
index 0000000..e9f13e0
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/read/metrics-svc.yaml
@@ -0,0 +1,31 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if and .Values.metrics.enabled (eq .Values.architecture "replication") }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ printf "%s-metrics" (include "postgresql.v1.readReplica.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: metrics-read
+ {{- if or .Values.commonAnnotations .Values.metrics.service.annotations }}
+ {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.service.annotations .Values.commonAnnotations ) "context" . ) }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
+ {{- end }}
+spec:
+ type: ClusterIP
+ sessionAffinity: {{ .Values.metrics.service.sessionAffinity }}
+ {{- if .Values.metrics.service.clusterIP }}
+ clusterIP: {{ .Values.metrics.service.clusterIP }}
+ {{- end }}
+ ports:
+ - name: http-metrics
+ port: {{ .Values.metrics.service.ports.metrics }}
+ targetPort: http-metrics
+ {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.podLabels .Values.commonLabels ) "context" . ) }}
+ selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: read
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/read/networkpolicy.yaml b/charts/openproject/charts/postgresql/templates/read/networkpolicy.yaml
new file mode 100644
index 0000000..79d3a5a
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/read/networkpolicy.yaml
@@ -0,0 +1,39 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if and .Values.networkPolicy.enabled (eq .Values.architecture "replication") .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled }}
+apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }}
+kind: NetworkPolicy
+metadata:
+ name: {{ printf "%s-ingress" (include "postgresql.v1.readReplica.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: read
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.podLabels .Values.commonLabels ) "context" . ) }}
+ podSelector:
+ matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }}
+ app.kubernetes.io/component: read
+ ingress:
+ {{- if and .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled (or .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector) }}
+ - from:
+ {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector }}
+ - namespaceSelector:
+ matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector "context" $) | nindent 14 }}
+ {{- end }}
+ {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector }}
+ - podSelector:
+ matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector "context" $) | nindent 14 }}
+ {{- end }}
+ ports:
+ - port: {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules "context" $) | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/read/servicemonitor.yaml b/charts/openproject/charts/postgresql/templates/read/servicemonitor.yaml
new file mode 100644
index 0000000..845734b
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/read/servicemonitor.yaml
@@ -0,0 +1,46 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled (eq .Values.architecture "replication") }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: {{ include "postgresql.v1.readReplica.fullname" . }}
+ namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace | quote }}
+ {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.labels .Values.commonLabels ) "context" . ) }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: metrics-read
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ {{- if .Values.metrics.serviceMonitor.jobLabel }}
+ jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }}
+ {{- end }}
+ selector:
+ {{- $svcLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.selector .Values.commonLabels ) "context" . ) }}
+ matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $svcLabels "context" $ ) | nindent 6 }}
+ app.kubernetes.io/component: metrics-read
+ endpoints:
+ - port: http-metrics
+ {{- if .Values.metrics.serviceMonitor.interval }}
+ interval: {{ .Values.metrics.serviceMonitor.interval }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
+ scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.relabelings }}
+ relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.metricRelabelings }}
+ metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.honorLabels }}
+ honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }}
+ {{- end }}
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace | quote }}
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/read/statefulset.yaml b/charts/openproject/charts/postgresql/templates/read/statefulset.yaml
new file mode 100644
index 0000000..8268700
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/read/statefulset.yaml
@@ -0,0 +1,552 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if eq .Values.architecture "replication" }}
+{{- $customUser := include "postgresql.v1.username" . }}
+apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
+kind: StatefulSet
+metadata:
+ name: {{ include "postgresql.v1.readReplica.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.labels .Values.commonLabels ) "context" . ) }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: read
+ {{- if or .Values.commonAnnotations .Values.readReplicas.annotations }}
+ {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.annotations .Values.commonAnnotations ) "context" . ) }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
+ {{- end }}
+spec:
+ replicas: {{ .Values.readReplicas.replicaCount }}
+ serviceName: {{ include "postgresql.v1.readReplica.svc.headless" . }}
+ {{- if .Values.readReplicas.updateStrategy }}
+ updateStrategy: {{- toYaml .Values.readReplicas.updateStrategy | nindent 4 }}
+ {{- end }}
+ {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.podLabels .Values.commonLabels ) "context" . ) }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }}
+ app.kubernetes.io/component: read
+ template:
+ metadata:
+ name: {{ include "postgresql.v1.readReplica.fullname" . }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }}
+ app.kubernetes.io/component: read
+ {{- if or (include "postgresql.v1.readReplicas.createExtendedConfigmap" .) .Values.readReplicas.podAnnotations }}
+ annotations:
+ {{- if (include "postgresql.v1.readReplicas.createExtendedConfigmap" .) }}
+ checksum/extended-configuration: {{ pick (include (print $.Template.BasePath "/primary/extended-configmap.yaml") . | fromYaml) "data" | toYaml | sha256sum }}
+ {{- end }}
+ {{- if .Values.readReplicas.podAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.podAnnotations "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- end }}
+ spec:
+ {{- if .Values.readReplicas.extraPodSpec }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraPodSpec "context" $) | nindent 6 }}
+ {{- end }}
+ serviceAccountName: {{ include "postgresql.v1.serviceAccountName" . }}
+ {{- include "postgresql.v1.imagePullSecrets" . | nindent 6 }}
+ {{- if .Values.readReplicas.hostAliases }}
+ hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.hostAliases "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.readReplicas.affinity }}
+ affinity: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.affinity "context" $) | nindent 8 }}
+ {{- else }}
+ affinity:
+ podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.readReplicas.podAffinityPreset "component" "read" "customLabels" $podLabels "context" $) | nindent 10 }}
+ podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.readReplicas.podAntiAffinityPreset "component" "read" "customLabels" $podLabels "context" $) | nindent 10 }}
+ nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.readReplicas.nodeAffinityPreset.type "key" .Values.readReplicas.nodeAffinityPreset.key "values" .Values.readReplicas.nodeAffinityPreset.values) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.readReplicas.nodeSelector }}
+ nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.nodeSelector "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.readReplicas.tolerations }}
+ tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.tolerations "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.readReplicas.topologySpreadConstraints }}
+ topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.topologySpreadConstraints "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.readReplicas.priorityClassName }}
+ priorityClassName: {{ .Values.readReplicas.priorityClassName }}
+ {{- end }}
+ {{- if .Values.readReplicas.schedulerName }}
+ schedulerName: {{ .Values.readReplicas.schedulerName | quote }}
+ {{- end }}
+ {{- if .Values.readReplicas.terminationGracePeriodSeconds }}
+ terminationGracePeriodSeconds: {{ .Values.readReplicas.terminationGracePeriodSeconds }}
+ {{- end }}
+ {{- if .Values.readReplicas.podSecurityContext.enabled }}
+ securityContext: {{- omit .Values.readReplicas.podSecurityContext "enabled" | toYaml | nindent 8 }}
+ {{- end }}
+ hostNetwork: {{ .Values.readReplicas.hostNetwork }}
+ hostIPC: {{ .Values.readReplicas.hostIPC }}
+ {{- if or (and .Values.tls.enabled (not .Values.volumePermissions.enabled)) (and .Values.volumePermissions.enabled (or .Values.readReplicas.persistence.enabled .Values.shmVolume.enabled)) .Values.readReplicas.initContainers }}
+ initContainers:
+ {{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }}
+ - name: copy-certs
+ image: {{ include "postgresql.v1.volumePermissions.image" . }}
+ imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+ {{- if .Values.readReplicas.resources }}
+ resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }}
+ {{- end }}
+ # We don't require a privileged container in this case
+ {{- if .Values.readReplicas.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.readReplicas.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ command:
+ - /bin/sh
+ - -ec
+ - |
+ cp /tmp/certs/* /opt/bitnami/postgresql/certs/
+ chmod 600 {{ include "postgresql.v1.tlsCertKey" . }}
+ volumeMounts:
+ - name: raw-certificates
+ mountPath: /tmp/certs
+ - name: postgresql-certificates
+ mountPath: /opt/bitnami/postgresql/certs
+ {{- else if and .Values.volumePermissions.enabled (or .Values.readReplicas.persistence.enabled .Values.shmVolume.enabled) }}
+ - name: init-chmod-data
+ image: {{ include "postgresql.v1.volumePermissions.image" . }}
+ imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+ {{- if .Values.readReplicas.resources }}
+ resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }}
+ {{- end }}
+ command:
+ - /bin/sh
+ - -ec
+ - |
+ {{- if .Values.readReplicas.persistence.enabled }}
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.readReplicas.persistence.mountPath }}
+ {{- else }}
+ chown {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} {{ .Values.readReplicas.persistence.mountPath }}
+ {{- end }}
+ mkdir -p {{ .Values.readReplicas.persistence.mountPath }}/data {{- if (include "postgresql.v1.mountConfigurationCM" .) }} {{ .Values.readReplicas.persistence.mountPath }}/conf {{- end }}
+ chmod 700 {{ .Values.readReplicas.persistence.mountPath }}/data {{- if (include "postgresql.v1.mountConfigurationCM" .) }} {{ .Values.readReplicas.persistence.mountPath }}/conf {{- end }}
+ find {{ .Values.readReplicas.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.v1.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ xargs -r chown -R `id -u`:`id -G | cut -d " " -f2`
+ {{- else }}
+ xargs -r chown -R {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ chmod -R 777 /dev/shm
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ cp /tmp/certs/* /opt/bitnami/postgresql/certs/
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/
+ {{- else }}
+ chown -R {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} /opt/bitnami/postgresql/certs/
+ {{- end }}
+ chmod 600 {{ include "postgresql.v1.tlsCertKey" . }}
+ {{- end }}
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }}
+ {{- else }}
+ securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ {{ if .Values.readReplicas.persistence.enabled }}
+ - name: data
+ mountPath: {{ .Values.readReplicas.persistence.mountPath }}
+ {{- if .Values.readReplicas.persistence.subPath }}
+ subPath: {{ .Values.readReplicas.persistence.subPath }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ - name: dshm
+ mountPath: /dev/shm
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: raw-certificates
+ mountPath: /tmp/certs
+ - name: postgresql-certificates
+ mountPath: /opt/bitnami/postgresql/certs
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.initContainers }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.initContainers "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- end }}
+ containers:
+ - name: postgresql
+ image: {{ include "postgresql.v1.image" . }}
+ imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+ {{- if .Values.readReplicas.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.readReplicas.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ {{- else if .Values.readReplicas.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.command "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- else if .Values.readReplicas.args }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.args "context" $) | nindent 12 }}
+ {{- end }}
+ env:
+ - name: BITNAMI_DEBUG
+ value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
+ - name: POSTGRESQL_PORT_NUMBER
+ value: {{ .Values.containerPorts.postgresql | quote }}
+ - name: POSTGRESQL_VOLUME_DIR
+ value: {{ .Values.readReplicas.persistence.mountPath | quote }}
+ {{- if .Values.readReplicas.persistence.mountPath }}
+ - name: PGDATA
+ value: {{ .Values.postgresqlDataDir | quote }}
+ {{- end }}
+ # Authentication
+ {{- if or (eq $customUser "postgres") (empty $customUser) }}
+ {{- if .Values.auth.enablePostgresUser }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: POSTGRES_PASSWORD_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.v1.adminPasswordKey" .) }}
+ {{- else }}
+ - name: POSTGRES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.v1.secretName" . }}
+ key: {{ include "postgresql.v1.adminPasswordKey" . }}
+ {{- end }}
+ {{- else }}
+ - name: ALLOW_EMPTY_PASSWORD
+ value: "true"
+ {{- end }}
+ {{- else }}
+ - name: POSTGRES_USER
+ value: {{ $customUser | quote }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: POSTGRES_PASSWORD_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.v1.userPasswordKey" .) }}
+ {{- else }}
+ - name: POSTGRES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.v1.secretName" . }}
+ key: {{ include "postgresql.v1.userPasswordKey" . }}
+ {{- end }}
+ {{- if .Values.auth.enablePostgresUser }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: POSTGRES_POSTGRES_PASSWORD_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.v1.adminPasswordKey" .) }}
+ {{- else }}
+ - name: POSTGRES_POSTGRES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.v1.secretName" . }}
+ key: {{ include "postgresql.v1.adminPasswordKey" . }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ # Replication
+ - name: POSTGRES_REPLICATION_MODE
+ value: "slave"
+ - name: POSTGRES_REPLICATION_USER
+ value: {{ .Values.auth.replicationUsername | quote }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: POSTGRES_REPLICATION_PASSWORD_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.v1.replicationPasswordKey" .) }}
+ {{- else }}
+ - name: POSTGRES_REPLICATION_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.v1.secretName" . }}
+ key: {{ include "postgresql.v1.replicationPasswordKey" . }}
+ {{- end }}
+ - name: POSTGRES_CLUSTER_APP_NAME
+ value: {{ .Values.replication.applicationName }}
+ - name: POSTGRES_MASTER_HOST
+ value: {{ include "postgresql.v1.primary.fullname" . }}
+ - name: POSTGRES_MASTER_PORT_NUMBER
+ value: {{ include "postgresql.v1.service.port" . | quote }}
+ # TLS
+ - name: POSTGRESQL_ENABLE_TLS
+ value: {{ ternary "yes" "no" .Values.tls.enabled | quote }}
+ {{- if .Values.tls.enabled }}
+ - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS
+ value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }}
+ - name: POSTGRESQL_TLS_CERT_FILE
+ value: {{ include "postgresql.v1.tlsCert" . }}
+ - name: POSTGRESQL_TLS_KEY_FILE
+ value: {{ include "postgresql.v1.tlsCertKey" . }}
+ {{- if .Values.tls.certCAFilename }}
+ - name: POSTGRESQL_TLS_CA_FILE
+ value: {{ include "postgresql.v1.tlsCACert" . }}
+ {{- end }}
+ {{- if .Values.tls.crlFilename }}
+ - name: POSTGRESQL_TLS_CRL_FILE
+ value: {{ include "postgresql.v1.tlsCRL" . }}
+ {{- end }}
+ {{- end }}
+ # Audit
+ - name: POSTGRESQL_LOG_HOSTNAME
+ value: {{ .Values.audit.logHostname | quote }}
+ - name: POSTGRESQL_LOG_CONNECTIONS
+ value: {{ .Values.audit.logConnections | quote }}
+ - name: POSTGRESQL_LOG_DISCONNECTIONS
+ value: {{ .Values.audit.logDisconnections | quote }}
+ {{- if .Values.audit.logLinePrefix }}
+ - name: POSTGRESQL_LOG_LINE_PREFIX
+ value: {{ .Values.audit.logLinePrefix | quote }}
+ {{- end }}
+ {{- if .Values.audit.logTimezone }}
+ - name: POSTGRESQL_LOG_TIMEZONE
+ value: {{ .Values.audit.logTimezone | quote }}
+ {{- end }}
+ {{- if .Values.audit.pgAuditLog }}
+ - name: POSTGRESQL_PGAUDIT_LOG
+ value: {{ .Values.audit.pgAuditLog | quote }}
+ {{- end }}
+ - name: POSTGRESQL_PGAUDIT_LOG_CATALOG
+ value: {{ .Values.audit.pgAuditLogCatalog | quote }}
+ # Others
+ - name: POSTGRESQL_CLIENT_MIN_MESSAGES
+ value: {{ .Values.audit.clientMinMessages | quote }}
+ - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES
+ value: {{ .Values.postgresqlSharedPreloadLibraries | quote }}
+ {{- if .Values.readReplicas.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if or .Values.readReplicas.extraEnvVarsCM .Values.readReplicas.extraEnvVarsSecret }}
+ envFrom:
+ {{- if .Values.readReplicas.extraEnvVarsCM }}
+ - configMapRef:
+ name: {{ .Values.readReplicas.extraEnvVarsCM }}
+ {{- end }}
+ {{- if .Values.readReplicas.extraEnvVarsSecret }}
+ - secretRef:
+ name: {{ .Values.readReplicas.extraEnvVarsSecret }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - name: tcp-postgresql
+ containerPort: {{ .Values.containerPorts.postgresql }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.readReplicas.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.readReplicas.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.startupProbe "enabled") "context" $) | nindent 12 }}
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ {{- if (include "postgresql.v1.database" .) }}
+ - exec pg_isready -U {{ default "postgres" $customUser| quote }} -d "dbname={{ include "postgresql.v1.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.v1.tlsCert" . }} sslkey={{ include "postgresql.v1.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- else }}
+ - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.v1.tlsCert" . }} sslkey={{ include "postgresql.v1.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.readReplicas.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.livenessProbe "enabled") "context" $) | nindent 12 }}
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ {{- if (include "postgresql.v1.database" .) }}
+ - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.v1.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.v1.tlsCert" . }} sslkey={{ include "postgresql.v1.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- else }}
+ - exec pg_isready -U {{default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.v1.tlsCert" . }} sslkey={{ include "postgresql.v1.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.readReplicas.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.readinessProbe "enabled") "context" $) | nindent 12 }}
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - -e
+ {{- include "postgresql.v1.readinessProbeCommand" . | nindent 16 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.resources }}
+ resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }}
+ {{- end }}
+ {{- if .Values.readReplicas.lifecycleHooks }}
+ lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.lifecycleHooks "context" $) | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: postgresql-password
+ mountPath: /opt/bitnami/postgresql/secrets/
+ {{- end }}
+ {{- if .Values.readReplicas.extendedConfiguration }}
+ - name: postgresql-extended-config
+ mountPath: {{ .Values.readReplicas.persistence.mountPath }}/conf/conf.d/
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: postgresql-certificates
+ mountPath: /opt/bitnami/postgresql/certs
+ readOnly: true
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ - name: dshm
+ mountPath: /dev/shm
+ {{- end }}
+ {{- if .Values.readReplicas.persistence.enabled }}
+ - name: data
+ mountPath: {{ .Values.readReplicas.persistence.mountPath }}
+ {{- if .Values.readReplicas.persistence.subPath }}
+ subPath: {{ .Values.readReplicas.persistence.subPath }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraVolumeMounts "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.metrics.enabled }}
+ - name: metrics
+ image: {{ include "postgresql.v1.metrics.image" . }}
+ imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
+ {{- if .Values.metrics.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.customMetrics }}
+ args: [ "--extend.query-path", "/conf/custom-metrics.yaml" ]
+ {{- end }}
+ env:
+ {{- $database := required "In order to enable metrics you need to specify a database (.Values.auth.database or .Values.global.postgresql.auth.database)" (include "postgresql.v1.database" .) }}
+ - name: DATA_SOURCE_URI
+ value: {{ printf "127.0.0.1:%d/%s?sslmode=disable" (int (include "postgresql.v1.service.port" .)) $database }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: DATA_SOURCE_PASS_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.v1.userPasswordKey" .) }}
+ {{- else }}
+ - name: DATA_SOURCE_PASS
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.v1.secretName" . }}
+ key: {{ include "postgresql.v1.userPasswordKey" . }}
+ {{- end }}
+ - name: DATA_SOURCE_USER
+ value: {{ default "postgres" $customUser | quote }}
+ {{- if .Values.metrics.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ ports:
+ - name: http-metrics
+ containerPort: {{ .Values.metrics.containerPorts.metrics }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.metrics.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: http-metrics
+ {{- end }}
+ {{- if .Values.metrics.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /
+ port: http-metrics
+ {{- end }}
+ {{- if .Values.metrics.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /
+ port: http-metrics
+ {{- end }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: postgresql-password
+ mountPath: /opt/bitnami/postgresql/secrets/
+ {{- end }}
+ {{- if .Values.metrics.customMetrics }}
+ - name: custom-metrics
+ mountPath: /conf
+ readOnly: true
+ {{- end }}
+ {{- if .Values.metrics.resources }}
+ resources: {{- toYaml .Values.metrics.resources | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.sidecars }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.sidecars "context" $ ) | nindent 8 }}
+ {{- end }}
+ volumes:
+ {{- if .Values.readReplicas.extendedConfiguration }}
+ - name: postgresql-extended-config
+ configMap:
+ name: {{ include "postgresql.v1.readReplicas.extendedConfigmapName" . }}
+ {{- end }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: postgresql-password
+ secret:
+ secretName: {{ include "postgresql.v1.secretName" . }}
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: raw-certificates
+ secret:
+ secretName: {{ include "postgresql.v1.tlsSecretName" . }}
+ - name: postgresql-certificates
+ emptyDir: {}
+ {{- end }}
+ {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }}
+ - name: custom-metrics
+ configMap:
+ name: {{ printf "%s-metrics" (include "postgresql.v1.readReplica.fullname" .) }}
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ - name: dshm
+ emptyDir:
+ medium: Memory
+ {{- if .Values.shmVolume.sizeLimit }}
+ sizeLimit: {{ .Values.shmVolume.sizeLimit }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.extraVolumes }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.extraVolumes "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if and .Values.readReplicas.persistence.enabled .Values.readReplicas.persistence.existingClaim }}
+ - name: data
+ persistentVolumeClaim:
+ claimName: {{ tpl .Values.readReplicas.persistence.existingClaim $ }}
+ {{- else if not .Values.readReplicas.persistence.enabled }}
+ - name: data
+ emptyDir: {}
+ {{- else }}
+ {{- if .Values.readReplicas.persistentVolumeClaimRetentionPolicy.enabled }}
+ persistentVolumeClaimRetentionPolicy:
+ whenDeleted: {{ .Values.readReplicas.persistentVolumeClaimRetentionPolicy.whenDeleted }}
+ whenScaled: {{ .Values.readReplicas.persistentVolumeClaimRetentionPolicy.whenScaled }}
+ {{- end }}
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ {{- if .Values.readReplicas.persistence.annotations }}
+ annotations: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.annotations "context" $) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.readReplicas.persistence.labels }}
+ labels: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.labels "context" $) | nindent 10 }}
+ {{- end }}
+ spec:
+ accessModes:
+ {{- range .Values.readReplicas.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ {{- if .Values.readReplicas.persistence.dataSource }}
+ dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.dataSource "context" $) | nindent 10 }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.readReplicas.persistence.size | quote }}
+ {{- if .Values.readReplicas.persistence.selector }}
+ selector: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.selector "context" $) | nindent 10 }}
+ {{- end -}}
+ {{- include "common.storage.class" (dict "persistence" .Values.readReplicas.persistence "global" .Values.global) | nindent 8 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/read/svc-headless.yaml b/charts/openproject/charts/postgresql/templates/read/svc-headless.yaml
new file mode 100644
index 0000000..249af5f
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/read/svc-headless.yaml
@@ -0,0 +1,38 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if eq .Values.architecture "replication" }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "postgresql.v1.readReplica.svc.headless" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: read
+ annotations:
+ {{- if or .Values.readReplicas.service.headless.annotations .Values.commonAnnotations }}
+ {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.service.headless.annotations .Values.commonAnnotations ) "context" . ) }}
+ {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
+ {{- end }}
+ # Use this annotation in addition to the actual publishNotReadyAddresses
+ # field below because the annotation will stop being respected soon but the
+ # field is broken in some versions of Kubernetes:
+ # https://github.com/kubernetes/kubernetes/issues/58662
+ service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
+spec:
+ type: ClusterIP
+ clusterIP: None
+ # We want all pods in the StatefulSet to have their addresses published for
+ # the sake of the other Postgresql pods even before they're ready, since they
+ # have to be able to talk to each other in order to become ready.
+ publishNotReadyAddresses: true
+ ports:
+ - name: tcp-postgresql
+ port: {{ include "postgresql.v1.readReplica.service.port" . }}
+ targetPort: tcp-postgresql
+ {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.podLabels .Values.commonLabels ) "context" . ) }}
+ selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: read
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/read/svc.yaml b/charts/openproject/charts/postgresql/templates/read/svc.yaml
new file mode 100644
index 0000000..d92c523
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/read/svc.yaml
@@ -0,0 +1,53 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if eq .Values.architecture "replication" }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "postgresql.v1.readReplica.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: read
+ {{- if or .Values.commonAnnotations .Values.readReplicas.service.annotations }}
+ {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.service.annotations .Values.commonAnnotations ) "context" . ) }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
+ {{- end }}
+spec:
+ type: {{ .Values.readReplicas.service.type }}
+ {{- if or (eq .Values.readReplicas.service.type "LoadBalancer") (eq .Values.readReplicas.service.type "NodePort") }}
+ externalTrafficPolicy: {{ .Values.readReplicas.service.externalTrafficPolicy | quote }}
+ {{- end }}
+ {{- if and (eq .Values.readReplicas.service.type "LoadBalancer") (not (empty .Values.readReplicas.service.loadBalancerSourceRanges)) }}
+ loadBalancerSourceRanges: {{ .Values.readReplicas.service.loadBalancerSourceRanges }}
+ {{- end }}
+ {{- if and (eq .Values.readReplicas.service.type "LoadBalancer") (not (empty .Values.readReplicas.service.loadBalancerIP)) }}
+ loadBalancerIP: {{ .Values.readReplicas.service.loadBalancerIP }}
+ {{- end }}
+ {{- if and .Values.readReplicas.service.clusterIP (eq .Values.readReplicas.service.type "ClusterIP") }}
+ clusterIP: {{ .Values.readReplicas.service.clusterIP }}
+ {{- end }}
+ {{- if .Values.readReplicas.service.sessionAffinity }}
+ sessionAffinity: {{ .Values.readReplicas.service.sessionAffinity }}
+ {{- end }}
+ {{- if .Values.readReplicas.service.sessionAffinityConfig }}
+ sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.sessionAffinityConfig "context" $) | nindent 4 }}
+ {{- end }}
+ ports:
+ - name: tcp-postgresql
+ port: {{ include "postgresql.v1.readReplica.service.port" . }}
+ targetPort: tcp-postgresql
+ {{- if and (or (eq .Values.readReplicas.service.type "NodePort") (eq .Values.readReplicas.service.type "LoadBalancer")) (not (empty .Values.readReplicas.service.nodePorts.postgresql)) }}
+ nodePort: {{ .Values.readReplicas.service.nodePorts.postgresql }}
+ {{- else if eq .Values.readReplicas.service.type "ClusterIP" }}
+ nodePort: null
+ {{- end }}
+ {{- if .Values.readReplicas.service.extraPorts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.extraPorts "context" $) | nindent 4 }}
+ {{- end }}
+ {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.podLabels .Values.commonLabels ) "context" . ) }}
+ selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }}
+ app.kubernetes.io/component: read
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/role.yaml b/charts/openproject/charts/postgresql/templates/role.yaml
new file mode 100644
index 0000000..a05805e
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/role.yaml
@@ -0,0 +1,33 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if .Values.rbac.create }}
+kind: Role
+apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+# yamllint disable rule:indentation
+rules:
+ {{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}}
+ {{- if and $pspAvailable .Values.psp.create }}
+ - apiGroups:
+ - 'policy'
+ resources:
+ - 'podsecuritypolicies'
+ verbs:
+ - 'use'
+ resourceNames:
+ - {{ include "common.names.fullname" . }}
+ {{- end }}
+ {{- if .Values.rbac.rules }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }}
+ {{- end }}
+# yamllint enable rule:indentation
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/rolebinding.yaml b/charts/openproject/charts/postgresql/templates/rolebinding.yaml
new file mode 100644
index 0000000..04323a0
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/rolebinding.yaml
@@ -0,0 +1,24 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if .Values.rbac.create }}
+kind: RoleBinding
+apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+roleRef:
+ kind: Role
+ name: {{ include "common.names.fullname" . }}
+ apiGroup: rbac.authorization.k8s.io
+subjects:
+ - kind: ServiceAccount
+ name: {{ include "postgresql.v1.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace | quote }}
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/secrets.yaml b/charts/openproject/charts/postgresql/templates/secrets.yaml
new file mode 100644
index 0000000..b4267ab
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/secrets.yaml
@@ -0,0 +1,99 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- $host := include "postgresql.v1.primary.fullname" . }}
+{{- $port := include "postgresql.v1.service.port" . }}
+{{- $customUser := include "postgresql.v1.username" . }}
+{{- $postgresPassword := include "common.secrets.lookup" (dict "secret" (include "postgresql.v1.secretName" .) "key" (coalesce .Values.global.postgresql.auth.secretKeys.adminPasswordKey .Values.auth.secretKeys.adminPasswordKey) "defaultValue" (ternary (coalesce .Values.global.postgresql.auth.password .Values.auth.password .Values.global.postgresql.auth.postgresPassword .Values.auth.postgresPassword) (coalesce .Values.global.postgresql.auth.postgresPassword .Values.auth.postgresPassword) (or (empty $customUser) (eq $customUser "postgres"))) "context" $) | trimAll "\"" | b64dec }}
+{{- if and (not $postgresPassword) .Values.auth.enablePostgresUser }}
+{{- $postgresPassword = randAlphaNum 10 }}
+{{- end }}
+{{- $replicationPassword := "" }}
+{{- if eq .Values.architecture "replication" }}
+{{- $replicationPassword = include "common.secrets.passwords.manage" (dict "secret" (include "postgresql.v1.secretName" .) "key" (coalesce .Values.global.postgresql.auth.secretKeys.replicationPasswordKey .Values.auth.secretKeys.replicationPasswordKey) "providedValues" (list "auth.replicationPassword") "context" $) | trimAll "\"" | b64dec }}
+{{- end }}
+{{- $ldapPassword := "" }}
+{{- if and .Values.ldap.enabled (or .Values.ldap.bind_password .Values.ldap.bindpw) }}
+{{- $ldapPassword = coalesce .Values.ldap.bind_password .Values.ldap.bindpw }}
+{{- end }}
+{{- $password := "" }}
+{{- if and (not (empty $customUser)) (ne $customUser "postgres") }}
+{{- $password = include "common.secrets.passwords.manage" (dict "secret" (include "postgresql.v1.secretName" .) "key" (coalesce .Values.global.postgresql.auth.secretKeys.userPasswordKey .Values.auth.secretKeys.userPasswordKey) "providedValues" (list "global.postgresql.auth.password" "auth.password") "context" $) | trimAll "\"" | b64dec }}
+{{- end }}
+{{- $database := include "postgresql.v1.database" . }}
+{{- if (include "postgresql.v1.createSecret" .) }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+type: Opaque
+data:
+ {{- if $postgresPassword }}
+ postgres-password: {{ $postgresPassword | b64enc | quote }}
+ {{- end }}
+ {{- if $password }}
+ password: {{ $password | b64enc | quote }}
+ {{- end }}
+ {{- if $replicationPassword }}
+ replication-password: {{ $replicationPassword | b64enc | quote }}
+ {{- end }}
+ # We don't auto-generate LDAP password when it's not provided as we do for other passwords
+ {{- if and .Values.ldap.enabled (or .Values.ldap.bind_password .Values.ldap.bindpw) }}
+ ldap-password: {{ $ldapPassword | b64enc | quote }}
+ {{- end }}
+{{- end }}
+{{- if .Values.serviceBindings.enabled }}
+{{- if $postgresPassword }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "common.names.fullname" . }}-svcbind-postgres
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+type: servicebinding.io/postgresql
+data:
+ provider: {{ print "bitnami" | b64enc | quote }}
+ type: {{ print "postgresql" | b64enc | quote }}
+ host: {{ $host | b64enc | quote }}
+ port: {{ $port | b64enc | quote }}
+ username: {{ print "postgres" | b64enc | quote }}
+ database: {{ print "postgres" | b64enc | quote }}
+ password: {{ $postgresPassword | b64enc | quote }}
+ uri: {{ printf "postgresql://postgres:%s@%s:%s/postgres" $postgresPassword $host $port | b64enc | quote }}
+{{- end }}
+{{- if $password }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "common.names.fullname" . }}-svcbind-custom-user
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+type: servicebinding.io/postgresql
+data:
+ provider: {{ print "bitnami" | b64enc | quote }}
+ type: {{ print "postgresql" | b64enc | quote }}
+ host: {{ $host | b64enc | quote }}
+ port: {{ $port | b64enc | quote }}
+ username: {{ $customUser | b64enc | quote }}
+ password: {{ $password | b64enc | quote }}
+ {{- if $database }}
+ database: {{ $database | b64enc | quote }}
+ {{- end }}
+ uri: {{ printf "postgresql://%s:%s@%s:%s/%s" $customUser $password $host $port $database | b64enc | quote }}
+{{- end }}
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/serviceaccount.yaml b/charts/openproject/charts/postgresql/templates/serviceaccount.yaml
new file mode 100644
index 0000000..8886bff
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/serviceaccount.yaml
@@ -0,0 +1,18 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if .Values.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "postgresql.v1.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }}
+ {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
+ {{- end }}
+automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/templates/tls-secrets.yaml b/charts/openproject/charts/postgresql/templates/tls-secrets.yaml
new file mode 100644
index 0000000..7e44a43
--- /dev/null
+++ b/charts/openproject/charts/postgresql/templates/tls-secrets.yaml
@@ -0,0 +1,30 @@
+{{- /*
+Copyright VMware, Inc.
+SPDX-License-Identifier: APACHE-2.0
+*/}}
+
+{{- if (include "postgresql.v1.createTlsSecret" . ) }}
+{{- $secretName := printf "%s-crt" (include "common.names.fullname" .) }}
+{{- $ca := genCA "postgresql-ca" 365 }}
+{{- $fullname := include "common.names.fullname" . }}
+{{- $releaseNamespace := .Release.Namespace }}
+{{- $clusterDomain := .Values.clusterDomain }}
+{{- $primaryHeadlessServiceName := include "postgresql.v1.primary.svc.headless" . }}
+{{- $readHeadlessServiceName := include "postgresql.v1.readReplica.svc.headless" . }}
+{{- $altNames := list (printf "*.%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $readHeadlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $readHeadlessServiceName $releaseNamespace $clusterDomain) $fullname }}
+{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ $secretName }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+type: kubernetes.io/tls
+data:
+ tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }}
+ tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }}
+ ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }}
+{{- end }}
diff --git a/charts/openproject/charts/postgresql/values.schema.json b/charts/openproject/charts/postgresql/values.schema.json
new file mode 100644
index 0000000..fc41483
--- /dev/null
+++ b/charts/openproject/charts/postgresql/values.schema.json
@@ -0,0 +1,156 @@
+{
+ "$schema": "http://json-schema.org/schema#",
+ "type": "object",
+ "properties": {
+ "architecture": {
+ "type": "string",
+ "title": "PostgreSQL architecture",
+ "form": true,
+ "description": "Allowed values: `standalone` or `replication`"
+ },
+ "auth": {
+ "type": "object",
+ "title": "Authentication configuration",
+ "form": true,
+ "properties": {
+ "enablePostgresUser": {
+ "type": "boolean",
+ "title": "Enable \"postgres\" admin user",
+ "description": "Assign a password to the \"postgres\" admin user. Otherwise, remote access will be blocked for this user",
+ "form": true
+ },
+ "postgresPassword": {
+ "type": "string",
+ "title": "Password for the \"postgres\" admin user",
+ "description": "Defaults to a random 10-character alphanumeric string if not set",
+ "form": true
+ },
+ "database": {
+ "type": "string",
+ "title": "PostgreSQL custom database",
+ "description": "Name of the custom database to be created during the 1st initialization of PostgreSQL",
+ "form": true
+ },
+ "username": {
+ "type": "string",
+ "title": "PostgreSQL custom user",
+ "description": "Name of the custom user to be created during the 1st initialization of PostgreSQL. This user only has permissions on the PostgreSQL custom database",
+ "form": true
+ },
+ "password": {
+ "type": "string",
+ "title": "Password for the custom user to create",
+ "description": "Defaults to a random 10-character alphanumeric string if not set",
+ "form": true
+ },
+ "replicationUsername": {
+ "type": "string",
+ "title": "PostgreSQL replication user",
+ "description": "Name of user used to manage replication.",
+ "form": true,
+ "hidden": {
+ "value": "standalone",
+ "path": "architecture"
+ }
+ },
+ "replicationPassword": {
+ "type": "string",
+ "title": "Password for PostgreSQL replication user",
+ "description": "Defaults to a random 10-character alphanumeric string if not set",
+ "form": true,
+ "hidden": {
+ "value": "standalone",
+ "path": "architecture"
+ }
+ }
+ }
+ },
+ "persistence": {
+ "type": "object",
+ "properties": {
+ "size": {
+ "type": "string",
+ "title": "Persistent Volume Size",
+ "form": true,
+ "render": "slider",
+ "sliderMin": 1,
+ "sliderMax": 100,
+ "sliderUnit": "Gi"
+ }
+ }
+ },
+ "resources": {
+ "type": "object",
+ "title": "Required Resources",
+ "description": "Configure resource requests",
+ "form": true,
+ "properties": {
+ "requests": {
+ "type": "object",
+ "properties": {
+ "memory": {
+ "type": "string",
+ "form": true,
+ "render": "slider",
+ "title": "Memory Request",
+ "sliderMin": 10,
+ "sliderMax": 2048,
+ "sliderUnit": "Mi"
+ },
+ "cpu": {
+ "type": "string",
+ "form": true,
+ "render": "slider",
+ "title": "CPU Request",
+ "sliderMin": 10,
+ "sliderMax": 2000,
+ "sliderUnit": "m"
+ }
+ }
+ }
+ }
+ },
+ "replication": {
+ "type": "object",
+ "form": true,
+ "title": "Replication Details",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "title": "Enable Replication",
+ "form": true
+ },
+ "readReplicas": {
+ "type": "integer",
+ "title": "read Replicas",
+ "form": true,
+ "hidden": {
+ "value": "standalone",
+ "path": "architecture"
+ }
+ }
+ }
+ },
+ "volumePermissions": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "form": true,
+ "title": "Enable Init Containers",
+ "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup"
+ }
+ }
+ },
+ "metrics": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "title": "Configure metrics exporter",
+ "form": true
+ }
+ }
+ }
+ }
+}
diff --git a/charts/openproject/charts/postgresql/values.yaml b/charts/openproject/charts/postgresql/values.yaml
new file mode 100644
index 0000000..d0c6279
--- /dev/null
+++ b/charts/openproject/charts/postgresql/values.yaml
@@ -0,0 +1,1601 @@
+# Copyright VMware, Inc.
+# SPDX-License-Identifier: APACHE-2.0
+
+## @section Global parameters
+## Please, note that this will override the parameters, including dependencies, configured to use the global value
+##
+global:
+ ## @param global.imageRegistry Global Docker image registry
+ ##
+ imageRegistry: ""
+ ## @param global.imagePullSecrets Global Docker registry secret names as an array
+ ## e.g.
+ ## imagePullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ imagePullSecrets: []
+ ## @param global.storageClass Global StorageClass for Persistent Volume(s)
+ ##
+ storageClass: ""
+ postgresql:
+ ## @param global.postgresql.auth.postgresPassword Password for the "postgres" admin user (overrides `auth.postgresPassword`)
+ ## @param global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`)
+ ## @param global.postgresql.auth.password Password for the custom user to create (overrides `auth.password`)
+ ## @param global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`)
+ ## @param global.postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`).
+ ## @param global.postgresql.auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set.
+ ## @param global.postgresql.auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set.
+ ## @param global.postgresql.auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set.
+ ##
+ auth:
+ postgresPassword: ""
+ username: ""
+ password: ""
+ database: ""
+ existingSecret: ""
+ secretKeys:
+ adminPasswordKey: ""
+ userPasswordKey: ""
+ replicationPasswordKey: ""
+ ## @param global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`)
+ ##
+ service:
+ ports:
+ postgresql: ""
+
+## @section Common parameters
+##
+
+## @param kubeVersion Override Kubernetes version
+##
+kubeVersion: ""
+## @param nameOverride String to partially override common.names.fullname template (will maintain the release name)
+##
+nameOverride: ""
+## @param fullnameOverride String to fully override common.names.fullname template
+##
+fullnameOverride: ""
+## @param clusterDomain Kubernetes Cluster Domain
+##
+clusterDomain: cluster.local
+## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template)
+##
+extraDeploy: []
+## @param commonLabels Add labels to all the deployed resources
+##
+commonLabels: {}
+## @param commonAnnotations Add annotations to all the deployed resources
+##
+commonAnnotations: {}
+## Enable diagnostic mode in the statefulset
+##
+diagnosticMode:
+ ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
+ ##
+ enabled: false
+ ## @param diagnosticMode.command Command to override all containers in the statefulset
+ ##
+ command:
+ - sleep
+ ## @param diagnosticMode.args Args to override all containers in the statefulset
+ ##
+ args:
+ - infinity
+
+## @section PostgreSQL common parameters
+##
+
+## Bitnami PostgreSQL image version
+## ref: https://hub.docker.com/r/bitnami/postgresql/tags/
+## @param image.registry PostgreSQL image registry
+## @param image.repository PostgreSQL image repository
+## @param image.tag PostgreSQL image tag (immutable tags are recommended)
+## @param image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+## @param image.pullPolicy PostgreSQL image pull policy
+## @param image.pullSecrets Specify image pull secrets
+## @param image.debug Specify if debug values should be set
+##
+image:
+ registry: docker.io
+ repository: bitnami/postgresql
+ tag: 15.4.0-debian-11-r45
+ digest: ""
+ ## Specify a imagePullPolicy
+ ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+ ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+ ##
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## Example:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+ ## Set to true if you would like to see extra information on logs
+ ##
+ debug: false
+## Authentication parameters
+## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#setting-the-root-password-on-first-run
+## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-on-first-run
+## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-user-on-first-run
+##
+auth:
+ ## @param auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user
+ ##
+ enablePostgresUser: true
+ ## @param auth.postgresPassword Password for the "postgres" admin user. Ignored if `auth.existingSecret` is provided
+ ##
+ postgresPassword: ""
+ ## @param auth.username Name for a custom user to create
+ ##
+ username: ""
+ ## @param auth.password Password for the custom user to create. Ignored if `auth.existingSecret` is provided
+ ##
+ password: ""
+ ## @param auth.database Name for a custom database to create
+ ##
+ database: ""
+ ## @param auth.replicationUsername Name of the replication user
+ ##
+ replicationUsername: repl_user
+ ## @param auth.replicationPassword Password for the replication user. Ignored if `auth.existingSecret` is provided
+ ##
+ replicationPassword: ""
+ ## @param auth.existingSecret Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case.
+ ##
+ existingSecret: ""
+ ## @param auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set.
+ ## @param auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set.
+ ## @param auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set.
+ ##
+ secretKeys:
+ adminPasswordKey: postgres-password
+ userPasswordKey: password
+ replicationPasswordKey: replication-password
+ ## @param auth.usePasswordFiles Mount credentials as a files instead of using an environment variable
+ ##
+ usePasswordFiles: false
+## @param architecture PostgreSQL architecture (`standalone` or `replication`)
+##
+architecture: standalone
+## Replication configuration
+## Ignored if `architecture` is `standalone`
+##
+replication:
+ ## @param replication.synchronousCommit Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off`
+ ## @param replication.numSynchronousReplicas Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`.
+ ## ref: https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT
+ ##
+ synchronousCommit: "off"
+ numSynchronousReplicas: 0
+ ## @param replication.applicationName Cluster application name. Useful for advanced replication settings
+ ##
+ applicationName: my_application
+## @param containerPorts.postgresql PostgreSQL container port
+##
+containerPorts:
+ postgresql: 5432
+## Audit settings
+## https://github.com/bitnami/containers/tree/main/bitnami/postgresql#auditing
+## @param audit.logHostname Log client hostnames
+## @param audit.logConnections Add client log-in operations to the log file
+## @param audit.logDisconnections Add client log-outs operations to the log file
+## @param audit.pgAuditLog Add operations to log using the pgAudit extension
+## @param audit.pgAuditLogCatalog Log catalog using pgAudit
+## @param audit.clientMinMessages Message log level to share with the user
+## @param audit.logLinePrefix Template for log line prefix (default if not set)
+## @param audit.logTimezone Timezone for the log timestamps
+##
+audit:
+ logHostname: false
+ logConnections: false
+ logDisconnections: false
+ pgAuditLog: ""
+ pgAuditLogCatalog: "off"
+ clientMinMessages: error
+ logLinePrefix: ""
+ logTimezone: ""
+## LDAP configuration
+## @param ldap.enabled Enable LDAP support
+## DEPRECATED ldap.url It will removed in a future, please use 'ldap.uri' instead
+## @param ldap.server IP address or name of the LDAP server.
+## @param ldap.port Port number on the LDAP server to connect to
+## @param ldap.prefix String to prepend to the user name when forming the DN to bind
+## @param ldap.suffix String to append to the user name when forming the DN to bind
+## DEPRECATED ldap.baseDN It will removed in a future, please use 'ldap.basedn' instead
+## DEPRECATED ldap.bindDN It will removed in a future, please use 'ldap.binddn' instead
+## DEPRECATED ldap.bind_password It will removed in a future, please use 'ldap.bindpw' instead
+## @param ldap.basedn Root DN to begin the search for the user in
+## @param ldap.binddn DN of user to bind to LDAP
+## @param ldap.bindpw Password for the user to bind to LDAP
+## DEPRECATED ldap.search_attr It will removed in a future, please use 'ldap.searchAttribute' instead
+## DEPRECATED ldap.search_filter It will removed in a future, please use 'ldap.searchFilter' instead
+## @param ldap.searchAttribute Attribute to match against the user name in the search
+## @param ldap.searchFilter The search filter to use when doing search+bind authentication
+## @param ldap.scheme Set to `ldaps` to use LDAPS
+## DEPRECATED ldap.tls as string is deprecated,please use 'ldap.tls.enabled' instead
+## @param ldap.tls.enabled Se to true to enable TLS encryption
+##
+ldap:
+ enabled: false
+ server: ""
+ port: ""
+ prefix: ""
+ suffix: ""
+ basedn: ""
+ binddn: ""
+ bindpw: ""
+ searchAttribute: ""
+ searchFilter: ""
+ scheme: ""
+ tls:
+ enabled: false
+ ## @param ldap.uri LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored.
+ ## Ref: https://www.postgresql.org/docs/current/auth-ldap.html
+ ##
+ uri: ""
+## @param postgresqlDataDir PostgreSQL data dir folder
+##
+postgresqlDataDir: /bitnami/postgresql/data
+## @param postgresqlSharedPreloadLibraries Shared preload libraries (comma-separated list)
+##
+postgresqlSharedPreloadLibraries: "pgaudit"
+## Start PostgreSQL pod(s) without limitations on shm memory.
+## By default docker and containerd (and possibly other container runtimes) limit `/dev/shm` to `64M`
+## ref: https://github.com/docker-library/postgres/issues/416
+## ref: https://github.com/containerd/containerd/issues/3654
+##
+shmVolume:
+ ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s)
+ ##
+ enabled: true
+ ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs
+ ## Note: the size of the tmpfs counts against container's memory limit
+ ## e.g:
+ ## sizeLimit: 1Gi
+ ##
+ sizeLimit: ""
+## TLS configuration
+##
+tls:
+ ## @param tls.enabled Enable TLS traffic support
+ ##
+ enabled: false
+ ## @param tls.autoGenerated Generate automatically self-signed TLS certificates
+ ##
+ autoGenerated: false
+ ## @param tls.preferServerCiphers Whether to use the server's TLS cipher preferences rather than the client's
+ ##
+ preferServerCiphers: true
+ ## @param tls.certificatesSecret Name of an existing secret that contains the certificates
+ ##
+ certificatesSecret: ""
+ ## @param tls.certFilename Certificate filename
+ ##
+ certFilename: ""
+ ## @param tls.certKeyFilename Certificate key filename
+ ##
+ certKeyFilename: ""
+ ## @param tls.certCAFilename CA Certificate filename
+ ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate
+ ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html
+ ##
+ certCAFilename: ""
+ ## @param tls.crlFilename File containing a Certificate Revocation List
+ ##
+ crlFilename: ""
+
+## @section PostgreSQL Primary parameters
+##
+primary:
+ ## @param primary.name Name of the primary database (eg primary, master, leader, ...)
+ ##
+ name: primary
+ ## @param primary.configuration PostgreSQL Primary main configuration to be injected as ConfigMap
+ ## ref: https://www.postgresql.org/docs/current/static/runtime-config.html
+ ##
+ configuration: ""
+ ## @param primary.pgHbaConfiguration PostgreSQL Primary client authentication configuration
+ ## ref: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html
+ ## e.g:#
+ ## pgHbaConfiguration: |-
+ ## local all all trust
+ ## host all all localhost trust
+ ## host mydatabase mysuser 192.168.0.0/24 md5
+ ##
+ pgHbaConfiguration: ""
+ ## @param primary.existingConfigmap Name of an existing ConfigMap with PostgreSQL Primary configuration
+ ## NOTE: `primary.configuration` and `primary.pgHbaConfiguration` will be ignored
+ ##
+ existingConfigmap: ""
+ ## @param primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration)
+ ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
+ ##
+ extendedConfiguration: ""
+ ## @param primary.existingExtendedConfigmap Name of an existing ConfigMap with PostgreSQL Primary extended configuration
+ ## NOTE: `primary.extendedConfiguration` will be ignored
+ ##
+ existingExtendedConfigmap: ""
+ ## Initdb configuration
+ ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#specifying-initdb-arguments
+ ##
+ initdb:
+ ## @param primary.initdb.args PostgreSQL initdb extra arguments
+ ##
+ args: ""
+ ## @param primary.initdb.postgresqlWalDir Specify a custom location for the PostgreSQL transaction log
+ ##
+ postgresqlWalDir: ""
+ ## @param primary.initdb.scripts Dictionary of initdb scripts
+ ## Specify dictionary of scripts to be run at first boot
+ ## e.g:
+ ## scripts:
+ ## my_init_script.sh: |
+ ## #!/bin/sh
+ ## echo "Do something."
+ ##
+ scripts: {}
+ ## @param primary.initdb.scriptsConfigMap ConfigMap with scripts to be run at first boot
+ ## NOTE: This will override `primary.initdb.scripts`
+ ##
+ scriptsConfigMap: ""
+ ## @param primary.initdb.scriptsSecret Secret with scripts to be run at first boot (in case it contains sensitive information)
+ ## NOTE: This can work along `primary.initdb.scripts` or `primary.initdb.scriptsConfigMap`
+ ##
+ scriptsSecret: ""
+ ## @param primary.initdb.user Specify the PostgreSQL username to execute the initdb scripts
+ ##
+ user: ""
+ ## @param primary.initdb.password Specify the PostgreSQL password to execute the initdb scripts
+ ##
+ password: ""
+ ## Configure current cluster's primary server to be the standby server in other cluster.
+ ## This will allow cross cluster replication and provide cross cluster high availability.
+ ## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled.
+ ## @param primary.standby.enabled Whether to enable current cluster's primary as standby server of another cluster or not
+ ## @param primary.standby.primaryHost The Host of replication primary in the other cluster
+ ## @param primary.standby.primaryPort The Port of replication primary in the other cluster
+ ##
+ standby:
+ enabled: false
+ primaryHost: ""
+ primaryPort: ""
+ ## @param primary.extraEnvVars Array with extra environment variables to add to PostgreSQL Primary nodes
+ ## e.g:
+ ## extraEnvVars:
+ ## - name: FOO
+ ## value: "bar"
+ ##
+ extraEnvVars: []
+ ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes
+ ##
+ extraEnvVarsCM: ""
+ ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL Primary nodes
+ ##
+ extraEnvVarsSecret: ""
+ ## @param primary.command Override default container command (useful when using custom images)
+ ##
+ command: []
+ ## @param primary.args Override default container args (useful when using custom images)
+ ##
+ args: []
+ ## Configure extra options for PostgreSQL Primary containers' liveness, readiness and startup probes
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
+ ## @param primary.livenessProbe.enabled Enable livenessProbe on PostgreSQL Primary containers
+ ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+ ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe
+ ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+ ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe
+ ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe
+ ##
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## @param primary.readinessProbe.enabled Enable readinessProbe on PostgreSQL Primary containers
+ ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+ ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe
+ ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+ ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe
+ ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe
+ ##
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## @param primary.startupProbe.enabled Enable startupProbe on PostgreSQL Primary containers
+ ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
+ ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe
+ ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe
+ ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe
+ ## @param primary.startupProbe.successThreshold Success threshold for startupProbe
+ ##
+ startupProbe:
+ enabled: false
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 1
+ failureThreshold: 15
+ successThreshold: 1
+ ## @param primary.customLivenessProbe Custom livenessProbe that overrides the default one
+ ##
+ customLivenessProbe: {}
+ ## @param primary.customReadinessProbe Custom readinessProbe that overrides the default one
+ ##
+ customReadinessProbe: {}
+ ## @param primary.customStartupProbe Custom startupProbe that overrides the default one
+ ##
+ customStartupProbe: {}
+ ## @param primary.lifecycleHooks for the PostgreSQL Primary container to automate configuration before or after startup
+ ##
+ lifecycleHooks: {}
+ ## PostgreSQL Primary resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param primary.resources.limits The resources limits for the PostgreSQL Primary containers
+ ## @param primary.resources.requests.memory The requested memory for the PostgreSQL Primary containers
+ ## @param primary.resources.requests.cpu The requested cpu for the PostgreSQL Primary containers
+ ##
+ resources:
+ limits: {}
+ requests:
+ memory: 256Mi
+ cpu: 250m
+ ## Pod Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ ## @param primary.podSecurityContext.enabled Enable security context
+ ## @param primary.podSecurityContext.fsGroup Group ID for the pod
+ ##
+ podSecurityContext:
+ enabled: true
+ fsGroup: 1001
+ ## Container Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ ## @param primary.containerSecurityContext.enabled Enable container security context
+ ## @param primary.containerSecurityContext.runAsUser User ID for the container
+ ## @param primary.containerSecurityContext.runAsGroup Group ID for the container
+ ## @param primary.containerSecurityContext.runAsNonRoot Set runAsNonRoot for the container
+ ## @param primary.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation for the container
+ ## @param primary.containerSecurityContext.seccompProfile.type Set seccompProfile.type for the container
+ ## @param primary.containerSecurityContext.capabilities.drop Set capabilities.drop for the container
+ ##
+ containerSecurityContext:
+ enabled: true
+ runAsUser: 1001
+ runAsGroup: 0
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+ ## @param primary.hostAliases PostgreSQL primary pods host aliases
+ ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
+ ##
+ hostAliases: []
+ ## @param primary.hostNetwork Specify if host network should be enabled for PostgreSQL pod (postgresql primary)
+ ##
+ hostNetwork: false
+ ## @param primary.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary)
+ ##
+ hostIPC: false
+ ## @param primary.labels Map of labels to add to the statefulset (postgresql primary)
+ ##
+ labels: {}
+ ## @param primary.annotations Annotations for PostgreSQL primary pods
+ ##
+ annotations: {}
+ ## @param primary.podLabels Map of labels to add to the pods (postgresql primary)
+ ##
+ podLabels: {}
+ ## @param primary.podAnnotations Map of annotations to add to the pods (postgresql primary)
+ ##
+ podAnnotations: {}
+ ## @param primary.podAffinityPreset PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAffinityPreset: ""
+ ## @param primary.podAntiAffinityPreset PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAntiAffinityPreset: soft
+ ## PostgreSQL Primary node affinity preset
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+ ##
+ nodeAffinityPreset:
+ ## @param primary.nodeAffinityPreset.type PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
+ ##
+ type: ""
+ ## @param primary.nodeAffinityPreset.key PostgreSQL primary node label key to match Ignored if `primary.affinity` is set.
+ ## E.g.
+ ## key: "kubernetes.io/e2e-az-name"
+ ##
+ key: ""
+ ## @param primary.nodeAffinityPreset.values PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set.
+ ## E.g.
+ ## values:
+ ## - e2e-az1
+ ## - e2e-az2
+ ##
+ values: []
+ ## @param primary.affinity Affinity for PostgreSQL primary pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set
+ ##
+ affinity: {}
+ ## @param primary.nodeSelector Node labels for PostgreSQL primary pods assignment
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+ ## @param primary.tolerations Tolerations for PostgreSQL primary pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ ## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
+ ##
+ topologySpreadConstraints: []
+ ## @param primary.priorityClassName Priority Class to use for each pod (postgresql primary)
+ ##
+ priorityClassName: ""
+ ## @param primary.schedulerName Use an alternate scheduler, e.g. "stork".
+ ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+ ##
+ schedulerName: ""
+ ## @param primary.terminationGracePeriodSeconds Seconds PostgreSQL primary pod needs to terminate gracefully
+ ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
+ ##
+ terminationGracePeriodSeconds: ""
+ ## @param primary.updateStrategy.type PostgreSQL Primary statefulset strategy type
+ ## @param primary.updateStrategy.rollingUpdate PostgreSQL Primary statefulset rolling update configuration parameters
+ ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+ ##
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate: {}
+ ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s)
+ ##
+ extraVolumeMounts: []
+ ## @param primary.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s)
+ ##
+ extraVolumes: []
+ ## @param primary.sidecars Add additional sidecar containers to the PostgreSQL Primary pod(s)
+ ## For example:
+ ## sidecars:
+ ## - name: your-image-name
+ ## image: your-image
+ ## imagePullPolicy: Always
+ ## ports:
+ ## - name: portname
+ ## containerPort: 1234
+ ##
+ sidecars: []
+ ## @param primary.initContainers Add additional init containers to the PostgreSQL Primary pod(s)
+ ## Example
+ ##
+ ## initContainers:
+ ## - name: do-something
+ ## image: busybox
+ ## command: ['do', 'something']
+ ##
+ initContainers: []
+ ## @param primary.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL Primary pod(s)
+ ##
+ extraPodSpec: {}
+ ## PostgreSQL Primary service configuration
+ ##
+ service:
+ ## @param primary.service.type Kubernetes Service type
+ ##
+ type: ClusterIP
+ ## @param primary.service.ports.postgresql PostgreSQL service port
+ ##
+ ports:
+ postgresql: 5432
+ ## Node ports to expose
+ ## NOTE: choose port between <30000-32767>
+ ## @param primary.service.nodePorts.postgresql Node port for PostgreSQL
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ ##
+ nodePorts:
+ postgresql: ""
+ ## @param primary.service.clusterIP Static clusterIP or None for headless services
+ ## e.g:
+ ## clusterIP: None
+ ##
+ clusterIP: ""
+ ## @param primary.service.annotations Annotations for PostgreSQL primary service
+ ##
+ annotations: {}
+ ## @param primary.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer`
+ ## Set the LoadBalancer service type to internal only
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+ ##
+ loadBalancerIP: ""
+ ## @param primary.service.externalTrafficPolicy Enable client source IP preservation
+ ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ ##
+ externalTrafficPolicy: Cluster
+ ## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer
+ ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
+ ##
+ ## loadBalancerSourceRanges:
+ ## - 10.10.10.0/24
+ ##
+ loadBalancerSourceRanges: []
+ ## @param primary.service.extraPorts Extra ports to expose in the PostgreSQL primary service
+ ##
+ extraPorts: []
+ ## @param primary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
+ ## If "ClientIP", consecutive client requests will be directed to the same Pod
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ ##
+ sessionAffinity: None
+ ## @param primary.service.sessionAffinityConfig Additional settings for the sessionAffinity
+ ## sessionAffinityConfig:
+ ## clientIP:
+ ## timeoutSeconds: 300
+ ##
+ sessionAffinityConfig: {}
+ ## Headless service properties
+ ##
+ headless:
+ ## @param primary.service.headless.annotations Additional custom annotations for headless PostgreSQL primary service
+ ##
+ annotations: {}
+ ## PostgreSQL Primary persistence configuration
+ ##
+ persistence:
+ ## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC
+ ##
+ enabled: true
+ ## @param primary.persistence.existingClaim Name of an existing PVC to use
+ ##
+ existingClaim: ""
+ ## @param primary.persistence.mountPath The path the volume will be mounted at
+ ## Note: useful when using custom PostgreSQL images
+ ##
+ mountPath: /bitnami/postgresql
+ ## @param primary.persistence.subPath The subdirectory of the volume to mount to
+ ## Useful in dev environments and one PV for multiple services
+ ##
+ subPath: ""
+ ## @param primary.persistence.storageClass PVC Storage Class for PostgreSQL Primary data volume
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ storageClass: ""
+ ## @param primary.persistence.accessModes PVC Access Mode for PostgreSQL volume
+ ##
+ accessModes:
+ - ReadWriteOnce
+ ## @param primary.persistence.size PVC Storage Request for PostgreSQL volume
+ ##
+ size: 8Gi
+ ## @param primary.persistence.annotations Annotations for the PVC
+ ##
+ annotations: {}
+ ## @param primary.persistence.labels Labels for the PVC
+ ##
+ labels: {}
+ ## @param primary.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template)
+ ## selector:
+ ## matchLabels:
+ ## app: my-app
+ ##
+ selector: {}
+ ## @param primary.persistence.dataSource Custom PVC data source
+ ##
+ dataSource: {}
+ ## PostgreSQL Primary Persistent Volume Claim Retention Policy
+ ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
+ ##
+ persistentVolumeClaimRetentionPolicy:
+ ## @param primary.persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for Primary Statefulset
+ ##
+ enabled: false
+ ## @param primary.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced
+ ##
+ whenScaled: Retain
+ ## @param primary.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted
+ ##
+ whenDeleted: Retain
+
+## @section PostgreSQL read only replica parameters (only used when `architecture` is set to `replication`)
+##
+readReplicas:
+ ## @param readReplicas.name Name of the read replicas database (eg secondary, slave, ...)
+ ##
+ name: read
+ ## @param readReplicas.replicaCount Number of PostgreSQL read only replicas
+ ##
+ replicaCount: 1
+ ## @param readReplicas.extendedConfiguration Extended PostgreSQL read only replicas configuration (appended to main or default configuration)
+ ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
+ ##
+ extendedConfiguration: ""
+ ## @param readReplicas.extraEnvVars Array with extra environment variables to add to PostgreSQL read only nodes
+ ## e.g:
+ ## extraEnvVars:
+ ## - name: FOO
+ ## value: "bar"
+ ##
+ extraEnvVars: []
+ ## @param readReplicas.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes
+ ##
+ extraEnvVarsCM: ""
+ ## @param readReplicas.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL read only nodes
+ ##
+ extraEnvVarsSecret: ""
+ ## @param readReplicas.command Override default container command (useful when using custom images)
+ ##
+ command: []
+ ## @param readReplicas.args Override default container args (useful when using custom images)
+ ##
+ args: []
+ ## Configure extra options for PostgreSQL read only containers' liveness, readiness and startup probes
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
+ ## @param readReplicas.livenessProbe.enabled Enable livenessProbe on PostgreSQL read only containers
+ ## @param readReplicas.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+ ## @param readReplicas.livenessProbe.periodSeconds Period seconds for livenessProbe
+ ## @param readReplicas.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+ ## @param readReplicas.livenessProbe.failureThreshold Failure threshold for livenessProbe
+ ## @param readReplicas.livenessProbe.successThreshold Success threshold for livenessProbe
+ ##
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## @param readReplicas.readinessProbe.enabled Enable readinessProbe on PostgreSQL read only containers
+ ## @param readReplicas.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+ ## @param readReplicas.readinessProbe.periodSeconds Period seconds for readinessProbe
+ ## @param readReplicas.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+ ## @param readReplicas.readinessProbe.failureThreshold Failure threshold for readinessProbe
+ ## @param readReplicas.readinessProbe.successThreshold Success threshold for readinessProbe
+ ##
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## @param readReplicas.startupProbe.enabled Enable startupProbe on PostgreSQL read only containers
+ ## @param readReplicas.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
+ ## @param readReplicas.startupProbe.periodSeconds Period seconds for startupProbe
+ ## @param readReplicas.startupProbe.timeoutSeconds Timeout seconds for startupProbe
+ ## @param readReplicas.startupProbe.failureThreshold Failure threshold for startupProbe
+ ## @param readReplicas.startupProbe.successThreshold Success threshold for startupProbe
+ ##
+ startupProbe:
+ enabled: false
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 1
+ failureThreshold: 15
+ successThreshold: 1
+ ## @param readReplicas.customLivenessProbe Custom livenessProbe that overrides the default one
+ ##
+ customLivenessProbe: {}
+ ## @param readReplicas.customReadinessProbe Custom readinessProbe that overrides the default one
+ ##
+ customReadinessProbe: {}
+ ## @param readReplicas.customStartupProbe Custom startupProbe that overrides the default one
+ ##
+ customStartupProbe: {}
+ ## @param readReplicas.lifecycleHooks for the PostgreSQL read only container to automate configuration before or after startup
+ ##
+ lifecycleHooks: {}
+ ## PostgreSQL read only resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param readReplicas.resources.limits The resources limits for the PostgreSQL read only containers
+ ## @param readReplicas.resources.requests.memory The requested memory for the PostgreSQL read only containers
+ ## @param readReplicas.resources.requests.cpu The requested cpu for the PostgreSQL read only containers
+ ##
+ resources:
+ limits: {}
+ requests:
+ memory: 256Mi
+ cpu: 250m
+ ## Pod Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ ## @param readReplicas.podSecurityContext.enabled Enable security context
+ ## @param readReplicas.podSecurityContext.fsGroup Group ID for the pod
+ ##
+ podSecurityContext:
+ enabled: true
+ fsGroup: 1001
+ ## Container Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ ## @param readReplicas.containerSecurityContext.enabled Enable container security context
+ ## @param readReplicas.containerSecurityContext.runAsUser User ID for the container
+ ## @param readReplicas.containerSecurityContext.runAsGroup Group ID for the container
+ ## @param readReplicas.containerSecurityContext.runAsNonRoot Set runAsNonRoot for the container
+ ## @param readReplicas.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation for the container
+ ## @param readReplicas.containerSecurityContext.seccompProfile.type Set seccompProfile.type for the container
+ ## @param readReplicas.containerSecurityContext.capabilities.drop Set capabilities.drop for the container
+ ##
+ containerSecurityContext:
+ enabled: true
+ runAsUser: 1001
+ runAsGroup: 0
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+ ## @param readReplicas.hostAliases PostgreSQL read only pods host aliases
+ ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
+ ##
+ hostAliases: []
+ ## @param readReplicas.hostNetwork Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only)
+ ##
+ hostNetwork: false
+ ## @param readReplicas.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary)
+ ##
+ hostIPC: false
+ ## @param readReplicas.labels Map of labels to add to the statefulset (PostgreSQL read only)
+ ##
+ labels: {}
+ ## @param readReplicas.annotations Annotations for PostgreSQL read only pods
+ ##
+ annotations: {}
+ ## @param readReplicas.podLabels Map of labels to add to the pods (PostgreSQL read only)
+ ##
+ podLabels: {}
+ ## @param readReplicas.podAnnotations Map of annotations to add to the pods (PostgreSQL read only)
+ ##
+ podAnnotations: {}
+ ## @param readReplicas.podAffinityPreset PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAffinityPreset: ""
+ ## @param readReplicas.podAntiAffinityPreset PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAntiAffinityPreset: soft
+ ## PostgreSQL read only node affinity preset
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+ ##
+ nodeAffinityPreset:
+ ## @param readReplicas.nodeAffinityPreset.type PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
+ ##
+ type: ""
+ ## @param readReplicas.nodeAffinityPreset.key PostgreSQL read only node label key to match Ignored if `primary.affinity` is set.
+ ## E.g.
+ ## key: "kubernetes.io/e2e-az-name"
+ ##
+ key: ""
+ ## @param readReplicas.nodeAffinityPreset.values PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set.
+ ## E.g.
+ ## values:
+ ## - e2e-az1
+ ## - e2e-az2
+ ##
+ values: []
+ ## @param readReplicas.affinity Affinity for PostgreSQL read only pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set
+ ##
+ affinity: {}
+ ## @param readReplicas.nodeSelector Node labels for PostgreSQL read only pods assignment
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+ ## @param readReplicas.tolerations Tolerations for PostgreSQL read only pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ ## @param readReplicas.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
+ ##
+ topologySpreadConstraints: []
+ ## @param readReplicas.priorityClassName Priority Class to use for each pod (PostgreSQL read only)
+ ##
+ priorityClassName: ""
+ ## @param readReplicas.schedulerName Use an alternate scheduler, e.g. "stork".
+ ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+ ##
+ schedulerName: ""
+ ## @param readReplicas.terminationGracePeriodSeconds Seconds PostgreSQL read only pod needs to terminate gracefully
+ ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
+ ##
+ terminationGracePeriodSeconds: ""
+ ## @param readReplicas.updateStrategy.type PostgreSQL read only statefulset strategy type
+ ## @param readReplicas.updateStrategy.rollingUpdate PostgreSQL read only statefulset rolling update configuration parameters
+ ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+ ##
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate: {}
+ ## @param readReplicas.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s)
+ ##
+ extraVolumeMounts: []
+ ## @param readReplicas.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s)
+ ##
+ extraVolumes: []
+ ## @param readReplicas.sidecars Add additional sidecar containers to the PostgreSQL read only pod(s)
+ ## For example:
+ ## sidecars:
+ ## - name: your-image-name
+ ## image: your-image
+ ## imagePullPolicy: Always
+ ## ports:
+ ## - name: portname
+ ## containerPort: 1234
+ ##
+ sidecars: []
+ ## @param readReplicas.initContainers Add additional init containers to the PostgreSQL read only pod(s)
+ ## Example
+ ##
+ ## initContainers:
+ ## - name: do-something
+ ## image: busybox
+ ## command: ['do', 'something']
+ ##
+ initContainers: []
+ ## @param readReplicas.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL read only pod(s)
+ ##
+ extraPodSpec: {}
+ ## PostgreSQL read only service configuration
+ ##
+ service:
+ ## @param readReplicas.service.type Kubernetes Service type
+ ##
+ type: ClusterIP
+ ## @param readReplicas.service.ports.postgresql PostgreSQL service port
+ ##
+ ports:
+ postgresql: 5432
+ ## Node ports to expose
+ ## NOTE: choose port between <30000-32767>
+ ## @param readReplicas.service.nodePorts.postgresql Node port for PostgreSQL
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ ##
+ nodePorts:
+ postgresql: ""
+ ## @param readReplicas.service.clusterIP Static clusterIP or None for headless services
+ ## e.g:
+ ## clusterIP: None
+ ##
+ clusterIP: ""
+ ## @param readReplicas.service.annotations Annotations for PostgreSQL read only service
+ ##
+ annotations: {}
+ ## @param readReplicas.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer`
+ ## Set the LoadBalancer service type to internal only
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+ ##
+ loadBalancerIP: ""
+ ## @param readReplicas.service.externalTrafficPolicy Enable client source IP preservation
+ ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ ##
+ externalTrafficPolicy: Cluster
+ ## @param readReplicas.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer
+ ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
+ ##
+ ## loadBalancerSourceRanges:
+ ## - 10.10.10.0/24
+ ##
+ loadBalancerSourceRanges: []
+ ## @param readReplicas.service.extraPorts Extra ports to expose in the PostgreSQL read only service
+ ##
+ extraPorts: []
+ ## @param readReplicas.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
+ ## If "ClientIP", consecutive client requests will be directed to the same Pod
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ ##
+ sessionAffinity: None
+ ## @param readReplicas.service.sessionAffinityConfig Additional settings for the sessionAffinity
+ ## sessionAffinityConfig:
+ ## clientIP:
+ ## timeoutSeconds: 300
+ ##
+ sessionAffinityConfig: {}
+ ## Headless service properties
+ ##
+ headless:
+ ## @param readReplicas.service.headless.annotations Additional custom annotations for headless PostgreSQL read only service
+ ##
+ annotations: {}
+ ## PostgreSQL read only persistence configuration
+ ##
+ persistence:
+ ## @param readReplicas.persistence.enabled Enable PostgreSQL read only data persistence using PVC
+ ##
+ enabled: true
+ ## @param readReplicas.persistence.existingClaim Name of an existing PVC to use
+ ##
+ existingClaim: ""
+ ## @param readReplicas.persistence.mountPath The path the volume will be mounted at
+ ## Note: useful when using custom PostgreSQL images
+ ##
+ mountPath: /bitnami/postgresql
+ ## @param readReplicas.persistence.subPath The subdirectory of the volume to mount to
+ ## Useful in dev environments and one PV for multiple services
+ ##
+ subPath: ""
+ ## @param readReplicas.persistence.storageClass PVC Storage Class for PostgreSQL read only data volume
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ storageClass: ""
+ ## @param readReplicas.persistence.accessModes PVC Access Mode for PostgreSQL volume
+ ##
+ accessModes:
+ - ReadWriteOnce
+ ## @param readReplicas.persistence.size PVC Storage Request for PostgreSQL volume
+ ##
+ size: 8Gi
+ ## @param readReplicas.persistence.annotations Annotations for the PVC
+ ##
+ annotations: {}
+ ## @param readReplicas.persistence.labels Labels for the PVC
+ ##
+ labels: {}
+ ## @param readReplicas.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template)
+ ## selector:
+ ## matchLabels:
+ ## app: my-app
+ ##
+ selector: {}
+ ## @param readReplicas.persistence.dataSource Custom PVC data source
+ ##
+ dataSource: {}
+ ## PostgreSQL Read only Persistent Volume Claim Retention Policy
+ ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
+ ##
+ persistentVolumeClaimRetentionPolicy:
+ ## @param readReplicas.persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for read only Statefulset
+ ##
+ enabled: false
+ ## @param readReplicas.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced
+ ##
+ whenScaled: Retain
+ ## @param readReplicas.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted
+ ##
+ whenDeleted: Retain
+
+
+## @section Backup parameters
+## This section implements a trivial logical dump cronjob of the database.
+## This only comes with the consistency guarantees of the dump program.
+## This is not a snapshot based roll forward/backward recovery backup.
+## ref: https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/
+backup:
+ ## @param backup.enabled Enable the logical dump of the database "regularly"
+ enabled: false
+ cronjob:
+ ## @param backup.cronjob.schedule Set the cronjob parameter schedule
+ schedule: "@daily"
+ ## @param backup.cronjob.concurrencyPolicy Set the cronjob parameter concurrencyPolicy
+ concurrencyPolicy: Allow
+ ## @param backup.cronjob.failedJobsHistoryLimit Set the cronjob parameter failedJobsHistoryLimit
+ failedJobsHistoryLimit: 1
+ ## @param backup.cronjob.successfulJobsHistoryLimit Set the cronjob parameter successfulJobsHistoryLimit
+ successfulJobsHistoryLimit: 3
+ ## @param backup.cronjob.startingDeadlineSeconds Set the cronjob parameter startingDeadlineSeconds
+ startingDeadlineSeconds: ""
+ ## @param backup.cronjob.ttlSecondsAfterFinished Set the cronjob parameter ttlSecondsAfterFinished
+ ttlSecondsAfterFinished: ""
+ ## @param backup.cronjob.restartPolicy Set the cronjob parameter restartPolicy
+ restartPolicy: OnFailure
+ ## @param backup.cronjob.podSecurityContext.enabled Enable PodSecurityContext for CronJob/Backup
+ ## @param backup.cronjob.podSecurityContext.fsGroup Group ID for the CronJob
+ podSecurityContext:
+ enabled: true
+ fsGroup: 1001
+ ## backup container's Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+ ## @param backup.cronjob.containerSecurityContext.runAsUser User ID for the backup container
+ ## @param backup.cronjob.containerSecurityContext.runAsGroup Group ID for the backup container
+ ## @param backup.cronjob.containerSecurityContext.runAsNonRoot Set backup container's Security Context runAsNonRoot
+ ## @param backup.cronjob.containerSecurityContext.readOnlyRootFilesystem Is the container itself readonly
+ ## @param backup.cronjob.containerSecurityContext.allowPrivilegeEscalation Is it possible to escalate backup pod(s) privileges
+ ## @param backup.cronjob.containerSecurityContext.seccompProfile.type Set backup container's Security Context seccompProfile type
+ ## @param backup.cronjob.containerSecurityContext.capabilities.drop Set backup container's Security Context capabilities to drop
+ containerSecurityContext:
+ runAsUser: 1001
+ runAsGroup: 0
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+ ## @param backup.cronjob.command Set backup container's command to run
+ command:
+ - /bin/sh
+ - -c
+ - "pg_dumpall --clean --if-exists --load-via-partition-root --quote-all-identifiers --no-password --file=${PGDUMP_DIR}/pg_dumpall-$(date '+%Y-%m-%d-%H-%M').pgdump"
+
+ ## @param backup.cronjob.labels Set the cronjob labels
+ labels: {}
+ ## @param backup.cronjob.annotations Set the cronjob annotations
+ annotations: {}
+ ## @param backup.cronjob.nodeSelector Node labels for PostgreSQL backup CronJob pod assignment
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+ storage:
+ ## @param backup.cronjob.storage.existingClaim Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`)
+ ## If defined, PVC must be created manually before volume will be bound
+ ##
+ existingClaim: ""
+ ## @param backup.cronjob.storage.resourcePolicy Setting it to "keep" to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted
+ ##
+ resourcePolicy: ""
+ ## @param backup.cronjob.storage.storageClass PVC Storage Class for the backup data volume
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner.
+ ##
+ storageClass: ""
+ ## @param backup.cronjob.storage.accessModes PV Access Mode
+ ##
+ accessModes:
+ - ReadWriteOnce
+ ## @param backup.cronjob.storage.size PVC Storage Request for the backup data volume
+ ##
+ size: 8Gi
+ ## @param backup.cronjob.storage.annotations PVC annotations
+ ##
+ annotations: {}
+ ## @param backup.cronjob.storage.mountPath Path to mount the volume at
+ ##
+ mountPath: /backup/pgdump
+ ## @param backup.cronjob.storage.subPath Subdirectory of the volume to mount at
+ ## and one PV for multiple services.
+ ##
+ subPath: ""
+ ## Fine tuning for volumeClaimTemplates
+ ##
+ volumeClaimTemplates:
+ ## @param backup.cronjob.storage.volumeClaimTemplates.selector A label query over volumes to consider for binding (e.g. when using local volumes)
+ ## A label query over volumes to consider for binding (e.g. when using local volumes)
+ ## See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#labelselector-v1-meta for more details
+ ##
+ selector: {}
+
+## @section NetworkPolicy parameters
+##
+
+## Add networkpolicies
+##
+networkPolicy:
+ ## @param networkPolicy.enabled Enable network policies
+ ##
+ enabled: false
+ ## @param networkPolicy.metrics.enabled Enable network policies for metrics (prometheus)
+ ## @param networkPolicy.metrics.namespaceSelector [object] Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace.
+ ## @param networkPolicy.metrics.podSelector [object] Monitoring pod selector labels. These labels will be used to identify the Prometheus pods.
+ ##
+ metrics:
+ enabled: false
+ ## e.g:
+ ## namespaceSelector:
+ ## label: monitoring
+ ##
+ namespaceSelector: {}
+ ## e.g:
+ ## podSelector:
+ ## label: monitoring
+ ##
+ podSelector: {}
+ ## Ingress Rules
+ ##
+ ingressRules:
+ ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin.
+ ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s).
+ ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s).
+ ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules Custom network policy for the PostgreSQL primary node.
+ ##
+ primaryAccessOnlyFrom:
+ enabled: false
+ ## e.g:
+ ## namespaceSelector:
+ ## label: ingress
+ ##
+ namespaceSelector: {}
+ ## e.g:
+ ## podSelector:
+ ## label: access
+ ##
+ podSelector: {}
+ ## custom ingress rules
+ ## e.g:
+ ## customRules:
+ ## - from:
+ ## - namespaceSelector:
+ ## matchLabels:
+ ## label: example
+ ##
+ customRules: []
+ ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin.
+ ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s).
+ ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s).
+ ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules Custom network policy for the PostgreSQL read-only nodes.
+ ##
+ readReplicasAccessOnlyFrom:
+ enabled: false
+ ## e.g:
+ ## namespaceSelector:
+ ## label: ingress
+ ##
+ namespaceSelector: {}
+ ## e.g:
+ ## podSelector:
+ ## label: access
+ ##
+ podSelector: {}
+ ## custom ingress rules
+ ## e.g:
+ ## CustomRules:
+ ## - from:
+ ## - namespaceSelector:
+ ## matchLabels:
+ ## label: example
+ ##
+ customRules: []
+ ## @param networkPolicy.egressRules.denyConnectionsToExternal Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53).
+ ## @param networkPolicy.egressRules.customRules Custom network policy rule
+ ##
+ egressRules:
+ # Deny connections to external. This is not compatible with an external database.
+ denyConnectionsToExternal: false
+ ## Additional custom egress rules
+ ## e.g:
+ ## customRules:
+ ## - to:
+ ## - namespaceSelector:
+ ## matchLabels:
+ ## label: example
+ ##
+ customRules: []
+
+## @section Volume Permissions parameters
+##
+
+## Init containers parameters:
+## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node
+##
+volumePermissions:
+ ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume
+ ##
+ enabled: false
+ ## @param volumePermissions.image.registry Init container volume-permissions image registry
+ ## @param volumePermissions.image.repository Init container volume-permissions image repository
+ ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended)
+ ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+ ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy
+ ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets
+ ##
+ image:
+ registry: docker.io
+ repository: bitnami/os-shell
+ tag: 11-debian-11-r77
+ digest: ""
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## Example:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+ ## Init container resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param volumePermissions.resources.limits Init container volume-permissions resource limits
+ ## @param volumePermissions.resources.requests Init container volume-permissions resource requests
+ ##
+ resources:
+ limits: {}
+ requests: {}
+ ## Init container' Security Context
+ ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser
+ ## and not the below volumePermissions.containerSecurityContext.runAsUser
+ ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container
+ ## @param volumePermissions.containerSecurityContext.runAsGroup Group ID for the init container
+ ## @param volumePermissions.containerSecurityContext.runAsNonRoot runAsNonRoot for the init container
+ ## @param volumePermissions.containerSecurityContext.seccompProfile.type seccompProfile.type for the init container
+ ##
+ containerSecurityContext:
+ runAsUser: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ seccompProfile:
+ type: RuntimeDefault
+## @section Other Parameters
+##
+
+## @param serviceBindings.enabled Create secret for service binding (Experimental)
+## Ref: https://servicebinding.io/service-provider/
+##
+serviceBindings:
+ enabled: false
+
+## Service account for PostgreSQL to use.
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+##
+serviceAccount:
+ ## @param serviceAccount.create Enable creation of ServiceAccount for PostgreSQL pod
+ ##
+ create: false
+ ## @param serviceAccount.name The name of the ServiceAccount to use.
+ ## If not set and create is true, a name is generated using the common.names.fullname template
+ ##
+ name: ""
+ ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
+ ## Can be set to false if pods using this serviceAccount do not need to use K8s API
+ ##
+ automountServiceAccountToken: true
+ ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount
+ ##
+ annotations: {}
+## Creates role for ServiceAccount
+## @param rbac.create Create Role and RoleBinding (required for PSP to work)
+##
+rbac:
+ create: false
+ ## @param rbac.rules Custom RBAC rules to set
+ ## e.g:
+ ## rules:
+ ## - apiGroups:
+ ## - ""
+ ## resources:
+ ## - pods
+ ## verbs:
+ ## - get
+ ## - list
+ ##
+ rules: []
+## Pod Security Policy
+## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later
+##
+psp:
+ create: false
+
+## @section Metrics Parameters
+##
+
+metrics:
+ ## @param metrics.enabled Start a prometheus exporter
+ ##
+ enabled: false
+ ## @param metrics.image.registry PostgreSQL Prometheus Exporter image registry
+ ## @param metrics.image.repository PostgreSQL Prometheus Exporter image repository
+ ## @param metrics.image.tag PostgreSQL Prometheus Exporter image tag (immutable tags are recommended)
+ ## @param metrics.image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+ ## @param metrics.image.pullPolicy PostgreSQL Prometheus Exporter image pull policy
+ ## @param metrics.image.pullSecrets Specify image pull secrets
+ ##
+ image:
+ registry: docker.io
+ repository: bitnami/postgres-exporter
+ tag: 0.14.0-debian-11-r2
+ digest: ""
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## Example:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+ ## @param metrics.customMetrics Define additional custom metrics
+ ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file
+ ## customMetrics:
+ ## pg_database:
+ ## query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')"
+ ## metrics:
+ ## - name:
+ ## usage: "LABEL"
+ ## description: "Name of the database"
+ ## - size_bytes:
+ ## usage: "GAUGE"
+ ## description: "Size of the database in bytes"
+ ##
+ customMetrics: {}
+ ## @param metrics.extraEnvVars Extra environment variables to add to PostgreSQL Prometheus exporter
+ ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables
+ ## For example:
+ ## extraEnvVars:
+ ## - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS
+ ## value: "true"
+ ##
+ extraEnvVars: []
+ ## PostgreSQL Prometheus exporter containers' Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+ ## @param metrics.containerSecurityContext.enabled Enable PostgreSQL Prometheus exporter containers' Security Context
+ ## @param metrics.containerSecurityContext.runAsUser Set PostgreSQL Prometheus exporter containers' Security Context runAsUser
+ ## @param metrics.containerSecurityContext.runAsGroup Set PostgreSQL Prometheus exporter containers' Security Context runAsGroup
+ ## @param metrics.containerSecurityContext.runAsNonRoot Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot
+ ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set PostgreSQL Prometheus exporter containers' Security Context allowPrivilegeEscalation
+ ## @param metrics.containerSecurityContext.seccompProfile.type Set PostgreSQL Prometheus exporter containers' Security Context seccompProfile.type
+ ## @param metrics.containerSecurityContext.capabilities.drop Set PostgreSQL Prometheus exporter containers' Security Context capabilities.drop
+ ##
+ containerSecurityContext:
+ enabled: true
+ runAsUser: 1001
+ runAsGroup: 0
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+ ## Configure extra options for PostgreSQL Prometheus exporter containers' liveness, readiness and startup probes
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
+ ## @param metrics.livenessProbe.enabled Enable livenessProbe on PostgreSQL Prometheus exporter containers
+ ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+ ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe
+ ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+ ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe
+ ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe
+ ##
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## @param metrics.readinessProbe.enabled Enable readinessProbe on PostgreSQL Prometheus exporter containers
+ ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+ ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe
+ ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+ ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe
+ ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe
+ ##
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## @param metrics.startupProbe.enabled Enable startupProbe on PostgreSQL Prometheus exporter containers
+ ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
+ ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe
+ ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe
+ ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe
+ ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe
+ ##
+ startupProbe:
+ enabled: false
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 1
+ failureThreshold: 15
+ successThreshold: 1
+ ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one
+ ##
+ customLivenessProbe: {}
+ ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one
+ ##
+ customReadinessProbe: {}
+ ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one
+ ##
+ customStartupProbe: {}
+ ## @param metrics.containerPorts.metrics PostgreSQL Prometheus exporter metrics container port
+ ##
+ containerPorts:
+ metrics: 9187
+ ## PostgreSQL Prometheus exporter resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param metrics.resources.limits The resources limits for the PostgreSQL Prometheus exporter container
+ ## @param metrics.resources.requests The requested resources for the PostgreSQL Prometheus exporter container
+ ##
+ resources:
+ limits: {}
+ requests: {}
+ ## Service configuration
+ ##
+ service:
+ ## @param metrics.service.ports.metrics PostgreSQL Prometheus Exporter service port
+ ##
+ ports:
+ metrics: 9187
+ ## @param metrics.service.clusterIP Static clusterIP or None for headless services
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
+ ##
+ clusterIP: ""
+ ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin
+ ## Values: ClientIP or None
+ ## ref: https://kubernetes.io/docs/user-guide/services/
+ ##
+ sessionAffinity: None
+ ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint
+ ##
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}"
+ ## Prometheus Operator ServiceMonitor configuration
+ ##
+ serviceMonitor:
+ ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator
+ ##
+ enabled: false
+ ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace)
+ ##
+ namespace: ""
+ ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped.
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+ ##
+ interval: ""
+ ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+ ##
+ scrapeTimeout: ""
+ ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus
+ ##
+ labels: {}
+ ## @param metrics.serviceMonitor.selector Prometheus instance selector labels
+ ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
+ ##
+ selector: {}
+ ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
+ ##
+ relabelings: []
+ ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion
+ ##
+ metricRelabelings: []
+ ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint
+ ##
+ honorLabels: false
+ ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
+ ##
+ jobLabel: ""
+ ## Custom PrometheusRule to be defined
+ ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
+ ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
+ ##
+ prometheusRule:
+ ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator
+ ##
+ enabled: false
+ ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace)
+ ##
+ namespace: ""
+ ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus
+ ##
+ labels: {}
+ ## @param metrics.prometheusRule.rules PrometheusRule definitions
+ ## Make sure to constraint the rules to the current postgresql service.
+ ## rules:
+ ## - alert: HugeReplicationLag
+ ## expr: pg_replication_lag{service="{{ printf "%s-metrics" (include "common.names.fullname" .) }}"} / 3600 > 1
+ ## for: 1m
+ ## labels:
+ ## severity: critical
+ ## annotations:
+ ## description: replication for {{ include "common.names.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s).
+ ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s).
+ ##
+ rules: []
diff --git a/charts/openproject/templates/NOTES.txt b/charts/openproject/templates/NOTES.txt
new file mode 100644
index 0000000..bae4286
--- /dev/null
+++ b/charts/openproject/templates/NOTES.txt
@@ -0,0 +1,11 @@
+Thank you for installing OpenProject 🎉
+
+{{- if .Values.ingress.enabled }}
+You can access it via http{{ if .Values.ingress.tls }}s{{ end }}://{{ .Values.ingress.host }}{{ .Values.ingress.path }}
+{{- end }}
+
+Summary:
+--------
+OpenProject: {{ .Values.image.tag }}
+PostgreSQL: {{ if .Values.postgresql.bundled }}{{ .Values.postgresql.image.tag }}{{ else }}external{{ end }}
+Memcached: {{ if .Values.memcached.bundled }}{{ .Values.memcached.image.tag }}{{ else }}external{{ end }}
diff --git a/charts/openproject/templates/_helpers.tpl b/charts/openproject/templates/_helpers.tpl
new file mode 100644
index 0000000..0a6e284
--- /dev/null
+++ b/charts/openproject/templates/_helpers.tpl
@@ -0,0 +1,152 @@
+{{/*
+Returns the OpenProject image to be used including the respective registry and image tag.
+*/}}
+{{- define "openproject.image" -}}
+{{ .Values.image.registry }}/{{ .Values.image.repository }}{{ if .Values.image.sha256 }}@sha256:{{ .Values.image.sha256 }}{{ else }}:{{ .Values.image.tag }}{{ end }}
+{{- end -}}
+
+{{/*
+Returns the OpenProject image pull secrets, if any are defined
+*/}}
+{{- define "openproject.imagePullSecrets" -}}
+{{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }}
+imagePullSecrets:
+ {{- range (coalesce .Values.imagePullSecrets .Values.global.imagePullSecrets) }}
+ - name: "{{ . }}"
+ {{- end }}
+{{- end }}
+{{- end -}}
+
+{{/*
+Yields the configured container security context if enabled.
+
+Allows writing to the container file system in development mode
+This way the OpenProject container works without mounted tmp volumes
+which may not work correctly in local development clusters.
+*/}}
+{{- define "openproject.containerSecurityContext" }}
+{{- if .Values.containerSecurityContext.enabled }}
+securityContext:
+ {{-
+ mergeOverwrite
+ (omit .Values.containerSecurityContext "enabled" | deepCopy)
+ (dict "readOnlyRootFilesystem" (and
+ (not .Values.develop)
+ (get .Values.containerSecurityContext "readOnlyRootFilesystem")
+ ))
+ | toYaml
+ | nindent 2
+ }}
+{{- end }}
+{{- end }}
+
+{{/* Yields the configured pod security context if enabled. */}}
+{{- define "openproject.podSecurityContext" }}
+{{- if .Values.podSecurityContext.enabled }}
+securityContext:
+ {{ omit .Values.podSecurityContext "enabled" | toYaml | nindent 2 | trim }}
+{{- end }}
+{{- end }}
+
+
+{{- define "openproject.useTmpVolumes" -}}
+{{- if ne .Values.openproject.useTmpVolumes nil -}}
+ {{- .Values.openproject.useTmpVolumes -}}
+{{- else -}}
+ {{- (not .Values.develop) -}}
+{{- end -}}
+{{- end -}}
+
+{{- define "openproject.tmpVolumeMounts" -}}
+{{- if eq (include "openproject.useTmpVolumes" .) "true" }}
+- mountPath: /tmp
+ name: tmp
+- mountPath: /app/tmp
+ name: app-tmp
+{{- end }}
+{{- end -}}
+
+{{- define "openproject.tmpVolumeSpec" -}}
+{{- if eq (include "openproject.useTmpVolumes" .) "true" }}
+- name: tmp
+ # we can't use emptyDir due to the sticky bit issue
+ # see: https://github.com/kubernetes/kubernetes/issues/110835
+ ephemeral:
+ volumeClaimTemplate:
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: {{ .Values.openproject.tmpVolumesStorage }}
+- name: app-tmp
+ # we can't use emptyDir due to the sticky bit / world writable issue
+ # see: https://github.com/kubernetes/kubernetes/issues/110835
+ ephemeral:
+ volumeClaimTemplate:
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: {{ .Values.openproject.tmpVolumesStorage }}
+{{- end }}
+{{- end -}}
+
+{{- define "openproject.envFrom" -}}
+- secretRef:
+ name: {{ include "common.names.fullname" . }}-core
+{{- if .Values.openproject.oidc.enabled }}
+- secretRef:
+ name: {{ include "common.names.fullname" . }}-oidc
+{{- end }}
+{{- if .Values.s3.enabled }}
+- secretRef:
+ name: {{ include "common.names.fullname" . }}-s3
+{{- end }}
+{{- if eq .Values.openproject.cache.store "memcache" }}
+- secretRef:
+ name: {{ include "common.names.fullname" . }}-memcached
+{{- end }}
+{{- if .Values.environment }}
+- secretRef:
+ name: {{ include "common.names.fullname" . }}-environment
+{{- end }}
+{{- if .Values.openproject.extraEnvVarsSecret }}
+- secretRef:
+ name: {{ .Values.openproject.extraEnvVarsSecret }}
+{{- end }}
+{{- if .Values.openproject.oidc.extraOidcSealedSecret }}
+- secretRef:
+ name: {{ .Values.openproject.oidc.extraOidcSealedSecret }}
+{{- end }}
+{{- end }}
+
+{{- define "openproject.env" -}}
+{{- if .Values.egress.tls.rootCA.fileName }}
+- name: SSL_CERT_FILE
+ value: "/etc/ssl/certs/custom-ca.pem"
+{{- end }}
+{{- if .Values.postgresql.auth.existingSecret }}
+- name: OPENPROJECT_DB_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.postgresql.auth.existingSecret }}
+ key: {{ .Values.postgresql.auth.secretKeys.userPasswordKey }}
+{{- else if .Values.postgresql.auth.password }}
+- name: OPENPROJECT_DB_PASSWORD
+ value: {{ .Values.postgresql.auth.password }}
+{{- else }}
+- name: OPENPROJECT_DB_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "common.names.dependency.fullname" (dict "chartName" "postgresql" "chartValues" .Values.postgresql "context" $) }}
+ key: {{ .Values.postgresql.auth.secretKeys.userPasswordKey }}
+{{- end }}
+{{- end }}
+
+{{- define "openproject.envChecksums" }}
+# annotate pods with env value checksums so changes trigger re-deployments
+{{/* If I knew how to map and reduce a range in helm I would do that and use a single checksum. But here we are. */}}
+{{- range $suffix := list "core" "memcached" "oidc" "s3" "environment" }}
+checksum/env-{{ $suffix }}: {{ include (print $.Template.BasePath "/secret_" $suffix ".yaml") $ | sha256sum }}
+{{- end }}
+{{- end }}
diff --git a/charts/openproject/templates/ingress.yaml b/charts/openproject/templates/ingress.yaml
new file mode 100644
index 0000000..6695da0
--- /dev/null
+++ b/charts/openproject/templates/ingress.yaml
@@ -0,0 +1,33 @@
+{{- if .Values.ingress.enabled -}}
+---
+apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }}
+kind: Ingress
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ labels:
+ {{- include "common.labels.standard" . | nindent 4 }}
+ {{- with .Values.ingress.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ {{- if .Values.ingress.ingressClassName }}
+ ingressClassName: {{ .Values.ingress.ingressClassName }}
+ {{- end }}
+ {{- if .Values.ingress.tls.enabled }}
+ tls:
+ - hosts:
+ - {{ .Values.ingress.host | quote }}
+ secretName: "{{ .Values.ingress.tls.secretName }}"
+ {{- end }}
+ rules:
+ - host: {{ .Values.ingress.host | quote }}
+ http:
+ paths:
+ - path: {{ .Values.ingress.path }}
+ {{- if eq "true" (include "common.ingress.supportsPathType" .) }}
+ pathType: {{ .Values.ingress.pathType }}
+ {{- end }}
+ backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "http" "context" $) | nindent 14 }}
+...
+{{- end }}
diff --git a/charts/openproject/templates/persistentvolumeclaim.yaml b/charts/openproject/templates/persistentvolumeclaim.yaml
new file mode 100644
index 0000000..8350d20
--- /dev/null
+++ b/charts/openproject/templates/persistentvolumeclaim.yaml
@@ -0,0 +1,24 @@
+{{- if .Values.persistence.enabled }}
+{{- if not .Values.persistence.existingClaim }}
+---
+apiVersion: "v1"
+kind: "PersistentVolumeClaim"
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ labels:
+ {{- include "common.labels.standard" . | nindent 4 }}
+ {{- with .Values.persistence.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ accessModes: {{ .Values.persistence.accessModes }}
+ {{- if .Values.persistence.storageClassName }}
+ storageClassName: {{ .Values.persistence.storageClassName }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size | quote }}
+...
+{{- end }}
+{{- end }}
diff --git a/charts/openproject/templates/secret_core.yaml b/charts/openproject/templates/secret_core.yaml
new file mode 100644
index 0000000..d9eb32a
--- /dev/null
+++ b/charts/openproject/templates/secret_core.yaml
@@ -0,0 +1,31 @@
+---
+apiVersion: "v1"
+kind: "Secret"
+metadata:
+ name: "{{ include "common.names.fullname" . }}-core"
+ labels:
+ {{- include "common.labels.standard" . | nindent 4 }}
+stringData:
+ {{- if .Values.postgresql.bundled }}
+ DATABASE_HOST: {{ printf "%s-postgresql.%s.svc.%s" .Release.Name .Release.Namespace .Values.clusterDomain | quote }}
+ DATABASE_PORT: "{{ .Values.postgresql.primary.service.ports.postgresql }}"
+ DATABASE_URL: "postgresql://{{ .Values.postgresql.auth.username }}@{{ include "common.names.dependency.fullname" (dict "chartName" "postgresql" "chartValues" .Values.postgresql "context" $) }}:{{ .Values.postgresql.primary.service.ports.postgresql }}/{{ .Values.postgresql.auth.database }}"
+ {{- else }}
+ DATABASE_HOST: "{{ .Values.postgresql.connection.host }}"
+ DATABASE_PORT: "{{ .Values.postgresql.connection.port }}"
+ DATABASE_URL: "postgresql://{{ .Values.postgresql.auth.username }}@{{ .Values.postgresql.connection.host }}:{{ .Values.postgresql.connection.port }}/{{ .Values.postgresql.auth.database }}"
+ {{- end }}
+ OPENPROJECT_SEED_ADMIN_USER_PASSWORD: {{ .Values.openproject.admin_user.password | quote }}
+ OPENPROJECT_SEED_ADMIN_USER_PASSWORD_RESET: {{ .Values.openproject.admin_user.password_reset | quote }}
+ OPENPROJECT_SEED_ADMIN_USER_NAME: {{ .Values.openproject.admin_user.name | quote }}
+ OPENPROJECT_SEED_ADMIN_USER_MAIL: {{ .Values.openproject.admin_user.mail | quote }}
+ OPENPROJECT_HTTPS: {{ (.Values.develop | ternary "false" .Values.openproject.https) | quote }}
+ OPENPROJECT_SEED_LOCALE: {{ .Values.openproject.seed_locale | quote }}
+ {{- if .Values.ingress.enabled }}
+ OPENPROJECT_HOST__NAME: {{ .Values.openproject.host | default .Values.ingress.host | quote }}
+ {{- end }}
+ OPENPROJECT_HSTS: {{ .Values.openproject.hsts | quote }}
+ OPENPROJECT_RAILS__CACHE__STORE: {{ .Values.openproject.cache.store | quote }}
+ OPENPROJECT_RAILS__RELATIVE__URL__ROOT: {{ .Values.openproject.railsRelativeUrlRoot | default "" | quote }}
+ POSTGRES_STATEMENT_TIMEOUT: {{ .Values.openproject.postgresStatementTimeout | quote }}
+...
diff --git a/charts/openproject/templates/secret_environment.yaml b/charts/openproject/templates/secret_environment.yaml
new file mode 100644
index 0000000..ab08a67
--- /dev/null
+++ b/charts/openproject/templates/secret_environment.yaml
@@ -0,0 +1,15 @@
+{{- if .Values.environment }}
+---
+apiVersion: "v1"
+kind: "Secret"
+metadata:
+ name: "{{ include "common.names.fullname" . }}-environment"
+ labels:
+ {{- include "common.labels.standard" . | nindent 4 }}
+stringData:
+ # Additional environment variables
+ {{- range $key, $value := .Values.environment }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+...
+{{- end }}
diff --git a/charts/openproject/templates/secret_memcached.yaml b/charts/openproject/templates/secret_memcached.yaml
new file mode 100644
index 0000000..a6a7dc6
--- /dev/null
+++ b/charts/openproject/templates/secret_memcached.yaml
@@ -0,0 +1,16 @@
+{{- if eq .Values.openproject.cache.store "memcache" }}
+---
+apiVersion: "v1"
+kind: "Secret"
+metadata:
+ name: "{{ include "common.names.fullname" . }}-memcached"
+ labels:
+ {{- include "common.labels.standard" . | nindent 4 }}
+stringData:
+ {{- if .Values.memcached.bundled }}
+ OPENPROJECT_CACHE__MEMCACHE__SERVER: "{{ .Release.Name }}-memcached:11211"
+ {{- else }}
+ OPENPROJECT_CACHE__MEMCACHE__SERVER: "{{ .Values.memcached.connection.host }}:{{.Values.memcached.connection.port }}"
+ {{- end }}
+...
+{{- end }}
diff --git a/charts/openproject/templates/secret_oidc.yaml b/charts/openproject/templates/secret_oidc.yaml
new file mode 100644
index 0000000..03a16a8
--- /dev/null
+++ b/charts/openproject/templates/secret_oidc.yaml
@@ -0,0 +1,32 @@
+{{- if .Values.openproject.oidc.enabled }}
+---
+apiVersion: "v1"
+kind: "Secret"
+metadata:
+ name: "{{ include "common.names.fullname" . }}-oidc"
+ labels:
+ {{- include "common.labels.standard" . | nindent 4 }}
+stringData:
+ # OpenID Connect settings
+ {{ $oidc_prefix := printf "OPENPROJECT_OPENID__CONNECT_%s" (upper .Values.openproject.oidc.provider) }}
+ {{ $oidc_prefix }}_DISPLAY__NAME: {{ .Values.openproject.oidc.displayName | quote }}
+ {{ $oidc_prefix }}_HOST: {{ .Values.openproject.oidc.host | quote }}
+ {{/* Fall back to '_' as secret name if the name is not given. This way `lookup` will return null (since secrets with this name will and cannot exist) which it doesn't with an empty string. */}}
+ {{ $secret := (lookup "v1" "Secret" .Release.Namespace (default "_" .Values.openproject.oidc.existingSecret)) | default (dict "data" dict) -}}
+ {{ $oidc_prefix }}_IDENTIFIER: {{
+ default .Values.openproject.oidc.identifier (get $secret.data .Values.openproject.oidc.secretKeys.identifier | b64dec) | quote
+ }}
+ {{ $oidc_prefix }}_SECRET: {{
+ default .Values.openproject.oidc.secret (get $secret.data .Values.openproject.oidc.secretKeys.secret | b64dec) | quote
+ }}
+ {{ $oidc_prefix }}_AUTHORIZATION__ENDPOINT: {{ .Values.openproject.oidc.authorizationEndpoint | quote }}
+ {{ $oidc_prefix }}_TOKEN__ENDPOINT: {{ .Values.openproject.oidc.tokenEndpoint | quote }}
+ {{ $oidc_prefix }}_USERINFO__ENDPOINT: {{ .Values.openproject.oidc.userinfoEndpoint | quote }}
+ {{ $oidc_prefix }}_END__SESSION__ENDPOINT: {{ .Values.openproject.oidc.endSessionEndpoint | quote }}
+ {{ $oidc_prefix }}_SCOPE: {{ .Values.openproject.oidc.scope | quote }}
+ {{- range $key, $value := .Values.openproject.oidc.attribute_map }}
+ {{ $mapping_key := printf "%s_ATTRIBUTE__MAP_%s" $oidc_prefix (upper $key) }}
+ {{ $mapping_key }}: {{ $value | quote }}
+ {{- end }}
+...
+{{- end }}
diff --git a/charts/openproject/templates/secret_s3.yaml b/charts/openproject/templates/secret_s3.yaml
new file mode 100644
index 0000000..354b01e
--- /dev/null
+++ b/charts/openproject/templates/secret_s3.yaml
@@ -0,0 +1,38 @@
+{{- if .Values.s3.enabled }}
+---
+apiVersion: "v1"
+kind: "Secret"
+metadata:
+ name: "{{ include "common.names.fullname" . }}-s3"
+ labels:
+ {{- include "common.labels.standard" . | nindent 4 }}
+stringData:
+ OPENPROJECT_ATTACHMENTS__STORAGE: fog
+ OPENPROJECT_FOG_CREDENTIALS_PROVIDER: AWS
+ {{/* Fall back to '_' as secret name if the name is not given. This way `lookup` will return null (since secrets with this name will and cannot exist) which it doesn't with an empty string. */}}
+ {{ $secret := (lookup "v1" "Secret" .Release.Namespace (default "_" .Values.s3.auth.existingSecret)) | default (dict "data" dict) -}}
+ OPENPROJECT_FOG_CREDENTIALS_AWS__ACCESS__KEY__ID: {{
+ default .Values.s3.auth.accessKeyId (get $secret.data .Values.s3.auth.secretKeys.accessKeyId | b64dec) | quote
+ }}
+ OPENPROJECT_FOG_CREDENTIALS_AWS__SECRET__ACCESS__KEY: {{
+ default .Values.s3.auth.secretAccessKey (get $secret.data .Values.s3.auth.secretKeys.secretAccessKey | b64dec) | quote
+ }}
+ {{ if .Values.s3.endpoint -}}
+ OPENPROJECT_FOG_CREDENTIALS_ENDPOINT: {{ .Values.s3.endpoint }}
+ {{- end }}
+ {{ if .Values.s3.host -}}
+ OPENPROJECT_FOG_CREDENTIALS_HOST: {{ .Values.s3.host }}
+ {{- end }}
+ {{ if .Values.s3.port -}}
+ OPENPROJECT_FOG_CREDENTIALS_PORT: "{{ .Values.s3.port }}"
+ {{- end }}
+ OPENPROJECT_FOG_DIRECTORY: {{ .Values.s3.bucketName }}
+ OPENPROJECT_FOG_CREDENTIALS_REGION: {{ .Values.s3.region }}
+ OPENPROJECT_FOG_CREDENTIALS_PATH__STYLE: "{{ .Values.s3.pathStyle }}"
+ OPENPROJECT_FOG_CREDENTIALS_AWS__SIGNATURE__VERSION: "{{ .Values.s3.signatureVersion }}"
+ # remove use_iam_profile fallback after some point
+ OPENPROJECT_FOG_CREDENTIALS_USE__IAM__PROFILE: {{ if or .Values.s3.use_iam_profile .Values.s3.useIamProfile }}"true"{{else}}"false"{{end}}
+ OPENPROJECT_FOG_CREDENTIALS_ENABLE__SIGNATURE__V4__STREAMING: {{ if .Values.s3.enableSignatureV4Streaming }}"true"{{else}}"false"{{end}}
+ OPENPROJECT_DIRECT__UPLOADS: {{ if .Values.s3.directUploads }}"true"{{else}}"false"{{end}}
+...
+{{- end }}
diff --git a/charts/openproject/templates/seeder-job.yaml b/charts/openproject/templates/seeder-job.yaml
new file mode 100644
index 0000000..84c0bc7
--- /dev/null
+++ b/charts/openproject/templates/seeder-job.yaml
@@ -0,0 +1,70 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ include "common.names.fullname" . }}-seeder-{{ now | date "20060102150405" }}
+ labels:
+ {{- include "common.labels.standard" . | nindent 4 }}
+ {{- with .Values.seederJob.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ ttlSecondsAfterFinished: 6000
+ template:
+ metadata:
+ labels:
+ {{- include "common.labels.standard" . | nindent 8 }}
+ openproject/process: seeder
+ {{- with .Values.seederJob.annotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- include "openproject.imagePullSecrets" . | indent 6 }}
+ {{- include "openproject.podSecurityContext" . | indent 6 }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{ toYaml . | nindent 8 | trim }}
+ {{- end }}
+ volumes:
+ {{- include "openproject.tmpVolumeSpec" . | indent 8 }}
+ {{- if .Values.persistence.enabled }}
+ - name: "data"
+ persistentVolumeClaim:
+ claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ include "common.names.fullname" . }}{{- end }}
+ {{- end }}
+ initContainers:
+ - name: check-db-ready
+ image: "{{ .Values.initdb.image.registry }}/{{ .Values.initdb.image.repository }}:{{ .Values.initdb.image.tag }}"
+ imagePullPolicy: {{ .Values.initdb.image.imagePullPolicy }}
+ command: [
+ 'sh',
+ '-c',
+ 'until pg_isready -h $DATABASE_HOST -p $DATABASE_PORT -U {{ .Values.postgresql.auth.username }}; do echo "waiting for database $DATABASE_HOST:$DATABASE_PORT"; sleep 2; done;'
+ ]
+ envFrom:
+ {{- include "openproject.envFrom" . | nindent 12 }}
+ env:
+ {{- include "openproject.env" . | nindent 12 }}
+ resources:
+ {{- toYaml .Values.initdb.resources | nindent 12 }}
+ {{- include "openproject.containerSecurityContext" . | indent 10 }}
+ containers:
+ - name: seeder
+ image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}{{ if .Values.image.sha256 }}@sha256:{{ .Values.image.sha256 }}{{ else }}:{{ .Values.image.tag }}{{ end }}"
+ imagePullPolicy: {{ .Values.image.imagePullPolicy }}
+ args:
+ - bash
+ - /app/docker/prod/seeder
+ envFrom:
+ {{- include "openproject.envFrom" . | nindent 12 }}
+ env:
+ {{- include "openproject.env" . | nindent 12 }}
+ volumeMounts:
+ {{- include "openproject.tmpVolumeMounts" . | indent 12 }}
+ {{- if .Values.persistence.enabled }}
+ - name: "data"
+ mountPath: "/var/openproject/assets"
+ {{- end }}
+ {{- include "openproject.containerSecurityContext" . | indent 10 }}
+ restartPolicy: OnFailure
diff --git a/charts/openproject/templates/service.yaml b/charts/openproject/templates/service.yaml
new file mode 100644
index 0000000..ac23b71
--- /dev/null
+++ b/charts/openproject/templates/service.yaml
@@ -0,0 +1,31 @@
+---
+{{- if or .Values.service.enabled .Values.ingress.enabled }}
+apiVersion: "v1"
+kind: "Service"
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ labels:
+ {{- include "common.labels.standard" . | nindent 4 }}
+spec:
+ type: {{ .Values.service.type }}
+ {{- if .Values.service.sessionAffinity.enabled }}
+ sessionAffinity: "ClientIP"
+ sessionAffinityConfig:
+ clientIP:
+ timeoutSeconds: {{ .Values.service.sessionAffinity.timeoutSeconds }}
+ {{- end }}
+ ports:
+ {{- range $key, $value := .Values.service.ports }}
+ - port: {{ $value.port }}
+ targetPort: {{ $key }}
+ protocol: {{ $value.protocol }}
+ name: {{ $key }}
+ {{- if and (eq $.Values.service.type "NodePort") $value.nodePort }}
+ nodePort: {{ $value.nodePort }}
+ {{- end }}
+ {{- end }}
+ selector:
+ {{- include "common.labels.matchLabels" . | nindent 4 }}
+ openproject/process: web
+{{- end }}
+...
diff --git a/charts/openproject/templates/serviceaccount.yaml b/charts/openproject/templates/serviceaccount.yaml
new file mode 100644
index 0000000..046ad71
--- /dev/null
+++ b/charts/openproject/templates/serviceaccount.yaml
@@ -0,0 +1,14 @@
+{{- if .Values.serviceAccount.create -}}
+---
+apiVersion: "v1"
+kind: "ServiceAccount"
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ labels:
+ {{- include "common.labels.standard" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+...
+{{- end }}
diff --git a/charts/openproject/templates/tests/test-connection.yaml b/charts/openproject/templates/tests/test-connection.yaml
new file mode 100644
index 0000000..bd63462
--- /dev/null
+++ b/charts/openproject/templates/tests/test-connection.yaml
@@ -0,0 +1,21 @@
+---
+apiVersion: "v1"
+kind: "Pod"
+metadata:
+ name: "{{ include "common.names.fullname" . }}-test-connection"
+ labels:
+ {{- include "common.labels.standard" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": test
+spec:
+ containers:
+ - name: "wget"
+ image: "busybox"
+ command: ['wget']
+ args:
+ - '--no-verbose'
+ - '--tries=1'
+ - '--spider'
+ - '{{ include "common.names.fullname" . }}:{{ .Values.service.ports.http.port }}/health_check'
+ restartPolicy: "Never"
+...
diff --git a/charts/openproject/templates/web-deployment.yaml b/charts/openproject/templates/web-deployment.yaml
new file mode 100644
index 0000000..4918558
--- /dev/null
+++ b/charts/openproject/templates/web-deployment.yaml
@@ -0,0 +1,128 @@
+---
+apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }}
+kind: Deployment
+metadata:
+ name: {{ include "common.names.fullname" . }}-web
+ labels:
+ {{- include "common.labels.standard" . | nindent 4 }}
+ openproject/process: web
+spec:
+ replicas: {{ .Values.replicaCount }}
+ strategy:
+ type: {{ .Values.strategy.type }}
+ selector:
+ matchLabels:
+ {{- include "common.labels.matchLabels" . | nindent 6 }}
+ openproject/process: web
+ template:
+ metadata:
+ annotations:
+ {{- range $key, $val := .Values.podAnnotations }}
+ {{ $key }}: {{ $val | quote }}
+ {{- end }}
+ {{- include "openproject.envChecksums" . | nindent 8 }}
+ labels:
+ {{- include "common.labels.standard" . | nindent 8 }}
+ openproject/process: web
+ spec:
+ {{- include "openproject.imagePullSecrets" . | indent 6 }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{ toYaml . | nindent 8 | trim }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{ toYaml . | nindent 8 | trim }}
+ {{- end }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{ toYaml . | nindent 8 | trim }}
+ {{- end }}
+ {{- include "openproject.podSecurityContext" . | indent 6 }}
+ serviceAccountName: {{ include "common.names.fullname" . }}
+ volumes:
+ {{- include "openproject.tmpVolumeSpec" . | indent 8 }}
+ {{- if .Values.egress.tls.rootCA.fileName }}
+ - name: ca-pemstore
+ configMap:
+ name: "{{- .Values.egress.tls.rootCA.configMap }}"
+ {{- end }}
+ {{- if .Values.persistence.enabled }}
+ - name: "data"
+ persistentVolumeClaim:
+ claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ include "common.names.fullname" . }}{{- end }}
+ {{- end }}
+ initContainers:
+ - name: wait-for-db
+ {{- include "openproject.containerSecurityContext" . | indent 10 }}
+ image: {{ include "openproject.image" . }}
+ imagePullPolicy: {{ .Values.image.imagePullPolicy }}
+ envFrom:
+ {{- include "openproject.envFrom" . | nindent 12 }}
+ env:
+ {{- include "openproject.env" . | nindent 12 }}
+ command:
+ - bash
+ - /app/docker/prod/wait-for-db
+ containers:
+ - name: "openproject"
+ {{- include "openproject.containerSecurityContext" . | indent 10 }}
+ image: {{ include "openproject.image" . }}
+ imagePullPolicy: {{ .Values.image.imagePullPolicy }}
+ envFrom:
+ {{- include "openproject.envFrom" . | nindent 12 }}
+ env:
+ {{- include "openproject.env" . | nindent 12 }}
+ command:
+ - bash
+ - /app/docker/prod/web
+ volumeMounts:
+ {{- include "openproject.tmpVolumeMounts" . | indent 12 }}
+ {{- if .Values.persistence.enabled }}
+ - name: "data"
+ mountPath: "/var/openproject/assets"
+ {{- end }}
+ {{- if .Values.egress.tls.rootCA.fileName }}
+ - name: ca-pemstore
+ mountPath: /etc/ssl/certs/custom-ca.pem
+ subPath: {{ .Values.egress.tls.rootCA.fileName }}
+ readOnly: false
+ {{- end }}
+ ports:
+ {{- range $key, $value := .Values.service.ports }}
+ - name: {{ $key }}
+ containerPort: {{ $value.containerPort }}
+ protocol: {{ $value.protocol }}
+ {{- end }}
+ {{- if .Values.probes.liveness.enabled }}
+ livenessProbe:
+ httpGet:
+ path: "{{ .Values.openproject.railsRelativeUrlRoot | default "" }}/health_checks/default"
+ port: 8080
+ httpHeaders:
+ # required otherwise health check will return 404 because health check is done using the Pod IP, which may cause issues with downstream variants
+ - name: Host
+ value: localhost
+ initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
+ timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
+ periodSeconds: {{ .Values.probes.liveness.periodSeconds }}
+ failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
+ successThreshold: {{ .Values.probes.liveness.successThreshold }}
+ {{- end }}
+ {{- if .Values.probes.readiness.enabled }}
+ readinessProbe:
+ httpGet:
+ path: "{{ .Values.openproject.railsRelativeUrlRoot | default "" }}/health_checks/default"
+ port: 8080
+ httpHeaders:
+ # required otherwise health check will return 404 because health check is done using the Pod IP, which may cause issues with downstream variants
+ - name: Host
+ value: localhost
+ initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
+ timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
+ periodSeconds: {{ .Values.probes.readiness.periodSeconds }}
+ failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
+ successThreshold: {{ .Values.probes.readiness.successThreshold }}
+ {{- end }}
+ resources:
+ {{- toYaml .Values.resources | nindent 12 }}
diff --git a/charts/openproject/templates/worker-deployment.yaml b/charts/openproject/templates/worker-deployment.yaml
new file mode 100644
index 0000000..3d28d3d
--- /dev/null
+++ b/charts/openproject/templates/worker-deployment.yaml
@@ -0,0 +1,98 @@
+{{- range $workerName, $workerValues := .Values.workers }}
+{{- with $ -}}
+---
+apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }}
+kind: Deployment
+metadata:
+ name: {{ include "common.names.fullname" . }}-worker-{{ $workerName }}
+ labels:
+ {{- include "common.labels.standard" . | nindent 4 }}
+ openproject/process: worker-{{ $workerName }}
+spec:
+ replicas: {{( kindIs "invalid" $workerValues.replicaCount) | ternary .Values.backgroundReplicaCount $workerValues.replicaCount }}
+ strategy:
+ {{ coalesce $workerValues.strategy .Values.strategy | toYaml | nindent 4 }}
+ selector:
+ matchLabels:
+ {{- include "common.labels.matchLabels" . | nindent 6 }}
+ openproject/process: worker-{{ $workerName }}
+ template:
+ metadata:
+ annotations:
+ {{- range $key, $val := .Values.podAnnotations }}
+ {{ $key }}: {{ $val | quote }}
+ {{- end }}
+ {{- include "openproject.envChecksums" . | nindent 8 }}
+ labels:
+ {{- include "common.labels.standard" . | nindent 8 }}
+ openproject/process: worker-{{ $workerName }}
+ spec:
+ {{- include "openproject.imagePullSecrets" . | indent 6 }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{ toYaml . | nindent 8 | trim }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{ toYaml . | nindent 8 | trim }}
+ {{- end }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{ toYaml . | nindent 8 | trim }}
+ {{- end }}
+ {{- include "openproject.podSecurityContext" . | indent 6 }}
+ serviceAccountName: {{ include "common.names.fullname" . }}
+ volumes:
+ {{- include "openproject.tmpVolumeSpec" . | indent 8 }}
+ {{- if .Values.egress.tls.rootCA.fileName }}
+ - name: ca-pemstore
+ configMap:
+ name: "{{- .Values.egress.tls.rootCA.configMap }}"
+ {{- end }}
+ {{- if .Values.persistence.enabled }}
+ - name: "data"
+ persistentVolumeClaim:
+ claimName: {{ include "common.names.fullname" . }}
+ {{- end }}
+ initContainers:
+ - name: wait-for-db
+ {{- include "openproject.containerSecurityContext" . | indent 10 }}
+ image: {{ include "openproject.image" . }}
+ imagePullPolicy: {{ .Values.image.imagePullPolicy }}
+ envFrom:
+ {{- include "openproject.envFrom" . | nindent 12 }}
+ env:
+ {{- include "openproject.env" . | nindent 12 }}
+ command:
+ - bash
+ - /app/docker/prod/wait-for-db
+ containers:
+ - name: "openproject"
+ {{- include "openproject.containerSecurityContext" . | indent 10 }}
+ image: {{ include "openproject.image" . }}
+ imagePullPolicy: {{ .Values.image.imagePullPolicy }}
+ envFrom:
+ {{- include "openproject.envFrom" . | nindent 12 }}
+ command:
+ - bash
+ - /app/docker/prod/worker
+ env:
+ {{- include "openproject.env" . | nindent 12 }}
+ - name: "QUEUE"
+ value: "{{ $workerValues.queues }}"
+ volumeMounts:
+ {{- include "openproject.tmpVolumeMounts" . | indent 12 }}
+ {{- if .Values.persistence.enabled }}
+ - name: "data"
+ mountPath: "/var/openproject/assets"
+ {{- end }}
+ {{- if .Values.egress.tls.rootCA.fileName }}
+ - name: ca-pemstore
+ mountPath: /etc/ssl/certs/custom-ca.pem
+ subPath: {{ .Values.egress.tls.rootCA.fileName }}
+ readOnly: false
+ {{- end }}
+ resources:
+ {{- coalesce $workerValues.resources .Values.resources | toYaml | nindent 12 }}
+{{- end }}
+{{ end }}
\ No newline at end of file
diff --git a/charts/openproject/values.yaml b/charts/openproject/values.yaml
new file mode 100644
index 0000000..d650bfe
--- /dev/null
+++ b/charts/openproject/values.yaml
@@ -0,0 +1,705 @@
+# Default values for openproject.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+## Enable development mode.
+##
+## Set this to true if you want are working on the charts locally using
+## local clusters such as minikube or kind.
+##
+## This will set `OPENPROJECT_HTTPS` to `false` and avoid using volumes for
+## tmp folders as (permissions for) these don't work correctly in local clusters.
+develop: false
+
+global:
+ ## Credentials to fetch images from private registry.
+ ##
+ ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ ## imagePullSecrets:
+ ## - myRegistryKeySecretName
+ #
+ imagePullSecrets: []
+
+## Affinity for pod assignment.
+##
+## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+#
+affinity: {}
+
+## Define additional environment variables.
+##
+## You can get a list of all environment variables when executing:
+## "RAILS_ENV=production bundle exec rake setting:available_envs"
+##
+## environment:
+## OPENPROJECT_ATTACHMENT__MAX__SIZE: 5120
+#
+environment: {}
+
+## Provide a name to substitute for the full names of resources.
+#
+fullnameOverride: ""
+
+##
+# Override the cluster domain name used in templating
+clusterDomain: "cluster.local"
+
+## Define settings for wait-for-db init-container
+#
+initdb:
+ image:
+ ## Define docker registry address.
+ #
+ registry: "docker.io"
+
+ ## Define repository string.
+ #
+ repository: "postgres"
+
+ # Postgres version to use
+ tag: 13
+
+ ## Define a imagePullPolicy.
+ ##
+ ## Ref.: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy
+ ##
+ ## "IfNotPresent" => The image is pulled only if it is not already present locally.
+ ## "Always" => Every time the kubelet launches a container, the kubelet queries the container image registry to
+ ## resolve the name to an image digest. If the kubelet has a container image with that exact digest cached
+ ## locally, the kubelet uses its cached image; otherwise, the kubelet pulls the image with the resolved
+ ## digest, and uses that image to launch the container.
+ ## "Never" => The kubelet does not try fetching the image. If the image is somehow already present locally, the
+ ## kubelet attempts to start the container; otherwise, startup fails
+ #
+ imagePullPolicy: "Always"
+
+ resources:
+ limits:
+ memory: "200Mi"
+ requests:
+ memory: "200Mi"
+
+## Define and create Kubernetes Service.
+##
+## Ref.: https://kubernetes.io/docs/concepts/services-networking/ingress/
+#
+ingress:
+ ## Whether to enable session affinity or not. It is required by ingress.
+ #
+ enabled: true
+
+ ## Define the name of the ingress class.
+ ##
+ ## If left empty, the cluster default is used.
+ ## Set this if you need a specific class, for instance `nginx`.
+ #
+ ingressClassName:
+
+ ## Define custom ingress annotations:
+ ##
+ ## Example:
+ ## annotations:
+ ## nginx.ingress.kubernetes.io/rewrite-target: /
+ annotations: {}
+
+ ## Define the Fully Qualified Domain Name (FQDN) where OpenProject should be reachable.
+ #
+ host: "openproject.example.com"
+
+ ## Define the path for OpenProject on your host.
+ #
+ path: /
+
+ ## Each path in an Ingress is required to have a corresponding path type. Paths that do not include an explicit
+ ## pathType will fail validation. There are three supported path types:
+ ##
+ ## "ImplementationSpecific" => With this path type, matching is up to the IngressClass. Implementations can treat this
+ ## as a separate pathType or treat it identically to Prefix or Exact path types.
+ ## "Exact" => Matches the URL path exactly and with case sensitivity.
+ ## "Prefix" => Matches based on a URL path prefix split by /.
+ ##
+ ## Ref.: https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types
+ #
+ pathType: "Prefix"
+
+ ## You can secure an Ingress by specifying a Secret that contains a TLS private key and certificate.
+ ##
+ ## Ref.: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
+ #
+ tls:
+ ## Whether to enable tls or not.
+ #
+ enabled: true
+
+ ## The name of the kubernetes secret which contains a TLS private key and certificate.
+ ## Hint: This secret is not created by this chart and must be provided.
+ ##
+ #
+ secretName: ""
+
+egress:
+ tls:
+ rootCA:
+ configMap: ""
+ fileName: ""
+
+## Define image setting
+#
+image:
+ ## Define docker registry address.
+ #
+ registry: "docker.io"
+
+ ## Define repository string.
+ #
+ repository: "openproject/community"
+
+ ## Define a imagePullPolicy.
+ ##
+ ## Ref.: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy
+ ##
+ ## "IfNotPresent" => The image is pulled only if it is not already present locally.
+ ## "Always" => Every time the kubelet launches a container, the kubelet queries the container image registry to
+ ## resolve the name to an image digest. If the kubelet has a container image with that exact digest cached
+ ## locally, the kubelet uses its cached image; otherwise, the kubelet pulls the image with the resolved
+ ## digest, and uses that image to launch the container.
+ ## "Never" => The kubelet does not try fetching the image. If the image is somehow already present locally, the
+ ## kubelet attempts to start the container; otherwise, startup fails
+ #
+ imagePullPolicy: "Always"
+
+ ## Define image tag.
+ ## For the helm chart, use the `-slim` variants as the all-in-one container is not compatible
+ ## with some of the options (non-root execution, password splitting, etc.) and is inefficient for using in helm
+ ## due to embedded a number of services.
+ tag: "13-slim"
+
+ ## Define image sha256 - mutual exclusive with image tag.
+ ## The sha256 has a higher precedence than
+ # sha256:
+
+## Credentials to fetch images from private registry.
+##
+## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+##
+## imagePullSecrets:
+## - myRegistryKeySecretName
+#
+imagePullSecrets: []
+
+## Configure memcached settings.
+#
+memcached:
+ ## When set to true, a memcached will be deployed into current namespace, when false you have to provide your own
+ ## memcached instance.
+ #
+ bundled: true
+
+ global:
+ containerSecurityContext:
+ enabled: true
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - "ALL"
+ seccompProfile:
+ type: "RuntimeDefault"
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+
+ ## When "bundled" is set to false, you need to define the memcached connection details.
+ #
+ connection:
+ host:
+ port:
+
+## String to partially override release name.
+#
+nameOverride: ""
+
+## Node labels for pod assignment.
+##
+## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+#
+nodeSelector: {}
+
+## Deployment strategy
+##
+## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
+#
+strategy:
+ ## Re-create pod during deployments by default since a writable volume is mounted.
+ ## Should your cluster support WriteMany volumes, you can change this
+ ## to `RollingUpdate`.
+ type: "Recreate"
+
+# Define the workers to run, their queues, replicas, strategy, and resources
+workers:
+ default:
+ queues: ""
+ replicas: 1
+ strategy:
+ type: "Recreate"
+ resources:
+ requests:
+ memory: "512Mi"
+ cpu: "250m"
+ limits:
+ memory: "4Gi"
+ cpu: "4"
+
+## OpenProject related settings.
+##
+## Ref.: https://www.openproject.org/docs/installation-and-operations/configuration/environment/#supported-environment-variables
+#
+openproject:
+ ## Enable https in backend response.
+ #
+ https: true
+
+ ## Define the host, defaults to value of "ingress.host".
+ #
+ host:
+
+ ## Enable HSTS.
+ #
+ hsts: true
+
+ ## Define Cache settings.
+ #
+ cache:
+ store: "memcache"
+
+ extraEnvVarsSecret: ""
+
+ ## Define the language to seed the instance in
+ #
+ seed_locale: "en"
+
+ ##
+ # Let OpenProject run in a subdirectory,
+ # e.g., https://exameple.com/openproject
+ # specify with leading slash, but without trailing slash
+ # e.g., /openproject
+ railsRelativeUrlRoot:
+
+ ## Define admin user details
+ # only applicable on first installation
+ # Note: Only applicable for versions >= 13.0
+ admin_user:
+ password: "admin"
+ password_reset: "true"
+ name: "OpenProject Admin"
+ mail: "admin@example.net"
+
+ ## Define OpenID Connect providers
+ oidc:
+ enabled: false
+ provider: "Keycloak"
+ displayName: "Keycloak"
+ host: ""
+ identifier: ""
+ secret: ""
+ authorizationEndpoint: ""
+ tokenEndpoint: ""
+ userinfoEndpoint: ""
+ endSessionEndpoint: ""
+ scope: "[openid]"
+
+ # Optional attribute mappings from the id token
+ attribute_map: {}
+
+ ## To avoid having sensitive credentials in your values.yaml, the preferred way is to
+ ## use an existing secret containing the OIDC compatible access credentials.
+ ## Specify the name of this existing secret here.
+ existingSecret:
+
+ ## In case your secret does not use the default keys in the secret, you can adjust them here.
+ secretKeys:
+ identifier: "clientId"
+ secret: "clientSecret"
+
+ # Allows usage of sealed-secret for `identifier` and `secret` values.
+ # Special use case for use in setups where heml template `lookup` function is not available.
+ # Ref: https://github.com/argoproj/argo-cd/issues/5202
+ #
+ extraOidcSealedSecret:
+
+ ## Modify PostgreSQL statement timout.
+ ## Increase in case you get errors such as "ERROR: canceling statement due to statement timeout".
+ ##
+ ## Ref.: https://www.openproject.org/docs/installation-and-operations/configuration/environment/#postgresql-statement_timeout
+ #
+ postgresStatementTimeout: 120s
+
+ ## Whether or not to use ephemeral volumes for /app/tmp and /tmp.
+ ## Falls back to a sensible default if undefined.
+ #
+ useTmpVolumes:
+
+ ## customize the tmp storage mount sizes
+ tmpVolumesStorage: "5Gi"
+
+## Whether to allocate persistent volume disk for the data directory.
+## In case of node failure, the node data directory will still persist.
+##
+## Ref.: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
+#
+persistence:
+ enabled: true
+
+ ## Define the volume access modes:
+ ##
+ ## "ReadWriteOnce" => The volume can be mounted as read-write by a single node. ReadWriteOnce access mode still can
+ ## allow multiple pods to access the volume when the pods are running on the same node.
+ ## "ReadOnlyMany" => The volume can be mounted as read-only by many nodes.
+ ## "ReadWriteMany" => The volume can be mounted as read-write by many nodes.
+ ## "ReadWriteOncePod" => The volume can be mounted as read-write by a single Pod. Use ReadWriteOncePod access mode if
+ ## you want to ensure that only one pod across whole cluster can read that PVC or write to it.
+ #
+ accessModes:
+ - "ReadWriteMany"
+
+ ## Define custom storage (PVC) annotations:
+ ##
+ annotations: {}
+
+ ## Define the volume size.
+ #
+ size: "1Gi"
+
+ ## Define the class of PV.
+ storageClassName:
+
+## Whether to use an S3-compatible object storage to store OpenProject attachments.
+## If this is enabled, files will NOT be stored in the mounted volume configured in `persistence` above.
+## The volume will not be used at all, so it `persistence.enabled` should be set to `false` in this case.
+##
+## Ref.: https://www.openproject.org/docs/installation-and-operations/configuration/#attachments-storage
+#
+s3:
+ enabled: false
+
+ auth:
+ # Provide the accessKeyId and secret in plain values
+ # We recommend to use the existing
+ Secret option instead
+ accessKeyId:
+ secretAccessKey:
+
+ ## To avoid having sensitive credentials in your values.yaml, the preferred way is to
+ ## use an existing secret containing the S3 compatible access credentials.
+ ## Specify the name of this existing secret here.
+ existingSecret:
+
+ ## In case your secret does not use the default keys in the secret, you can adjust them here.
+ secretKeys:
+ accessKeyId: "accessKeyId"
+ secretAccessKey: "secretAccessKey"
+
+ region:
+ bucketName:
+
+ ## Remove or leave empty to use default AWS S3 endpoint
+ #
+ endpoint:
+ host:
+ port:
+ pathStyle: false
+ signatureVersion: 4
+ useIamProfile: false
+ # Some providers do not properly support signature v4 streaming (e.g. Scaleway)
+ enableSignatureV4Streaming: true
+
+ ## If enabled, upload files directly to S3 from the browser instead of going through OpenProject.
+ ## May not be supported by providers other than AWS S3 itself.
+ ##
+ ## Ref.: https://www.openproject.org/docs/installation-and-operations/configuration/#direct-uploads
+ #
+ directUploads: true
+
+ ## You can always override these options via the environment, for instance:
+ ##
+ ## environment:
+ ## OPENPROJECT_FOG_CREDENTIALS_REGION: 'us-east-1'
+ ##
+ ## Ref.: https://www.openproject.org/docs/installation-and-operations/configuration/#attachments-storage
+
+## Define custom pod annotations.
+#
+podAnnotations: {}
+
+## Pod Security Context.
+##
+## We use the default value of `1000` for `fsGroup` since that
+## is the app user's group ID and if the user wants to be able to
+## write to `/var/openproject/assets` the mounted folder needs to
+## have a matching gid.
+##
+## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+#
+podSecurityContext:
+ enabled: true
+ fsGroup: 1000
+
+## Container security context using as a default best practice values
+## granting minimum privileges.
+##
+## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+#
+containerSecurityContext:
+ enabled: true
+ runAsUser: 1000
+ runAsGroup: 1000
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - "ALL"
+ seccompProfile:
+ type: "RuntimeDefault"
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+
+## Configure PostgreSQL settings.
+#
+postgresql:
+ ## When set to true, a postgres server will be deployed into current namespace, when false you have to provide your
+ ## own database instance.
+ #
+ bundled: true
+
+ global:
+ containerSecurityContext:
+ enabled: true
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - "ALL"
+ seccompProfile:
+ type: "RuntimeDefault"
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+
+ ## When "bundled" is set to false, you need to define the database connection details.
+ #
+ connection:
+ host:
+ port:
+
+ ## Database auth details.
+ #
+ auth:
+ ## To avoid having sensitive credentials in your values.yaml, the preferred way to provide a password
+ ## is to use an existing secret containing the PostgreSQL credentials.
+ ## Specify the name of this existing secret here.
+ ##
+ ## If neither an existing secret nor passwords are defined, a secret is generated automatically.
+ ##
+ ## The postgresql chart will create this secret (the name of which ends with `-postgresql` by default)
+ ## with generated user and admin passwords.
+ ## If you want to see the base64 encoded passwords you can output the secret like this:
+ ##
+ ## ```
+ ## kubectl get secret -n <namespace> openproject-postgresql -o yaml | grep password
+ ## ```
+ #
+ existingSecret: ""
+
+ ## In case your secret does not use the default keys in the secret, you can adjust them here.
+ ##
+ ## secretKeys:
+ ## adminPasswordKey: "postgres-password"
+ ## userPasswordKey: "password"
+
+ ## Database username.
+ #
+ username: "openproject"
+
+ ## Database name.
+ #
+ database: "openproject"
+
+ ## If you are not using a Kubernetes secret to store your postgresql credentials,
+ ## you can specify them here if you really must. Please handle with care!
+
+ ## Database password.
+ #
+ password: ""
+
+ ## Database root password.
+ #
+ postgresPassword: ""
+
+ ## When using the "bundled" postgresql chart, you can configure the storageClass and other settings similar to this
+ ## Ref: https://github.com/bitnami/charts/blob/main/bitnami/postgresql/values.yaml
+ #
+ # global:
+ # storageClass: my-storage-class-name
+
+## Configure liveness and readiness probes.
+##
+## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
+#
+probes:
+ ## Configure liveness probe.
+ ##
+ ## If the liveness probe fails, the container will be restarted.
+ #
+ liveness:
+ ## Whether to enable liveness probes.
+ #
+ enabled: true
+
+ ## Number of seconds after the container has started before startup, liveness or readiness probes are initiated.
+ ## Defaults to 0 seconds. Minimum value is 0.
+ #
+ initialDelaySeconds: 120
+
+ ## Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1.
+ #
+ timeoutSeconds: 3
+
+ ## How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ #
+ periodSeconds: 30
+
+ ## When a probe fails, Kubernetes will try failureThreshold times before giving up. Giving up in case of liveness
+ ## probe means restarting the container. In case of readiness probe the Pod will be marked Unready. Defaults to 3.
+ ## Minimum value is 1.
+ #
+ failureThreshold: 3
+
+ ## Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1.
+ ## Must be 1 for liveness and startup Probes. Minimum value is 1.
+ #
+ successThreshold: 1
+
+ ## Configure readiness probe.
+ ##
+ ## If the readiness probe failes, no traffic will be routed to the container.
+ #
+ readiness:
+ ## Whether to enable liveness probes.
+ #
+ enabled: true
+
+ ## Number of seconds after the container has started before startup, liveness or readiness probes are initiated.
+ ## Defaults to 0 seconds. Minimum value is 0.
+ #
+ initialDelaySeconds: 30
+
+ ## Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1.
+ #
+ timeoutSeconds: 3
+
+ ## How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ #
+ periodSeconds: 15
+
+ ## When a probe fails, Kubernetes will try failureThreshold times before giving up. Giving up in case of liveness
+ ## probe means restarting the container. In case of readiness probe the Pod will be marked Unready. Defaults to 3.
+ ## Minimum value is 1.
+ #
+ failureThreshold: 30
+
+ ## Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1.
+ ## Must be 1 for liveness and startup Probes. Minimum value is 1.
+ #
+ successThreshold: 1
+
+## Number of OpenProject web process replicas.
+#
+replicaCount: 1
+
+## Number of OpenProject background worker process replicas.
+#
+backgroundReplicaCount: 1
+
+## Configure resource requests and limits.
+##
+## http://kubernetes.io/docs/user-guide/compute-resources/
+#
+resources:
+ requests:
+ memory: "512Mi"
+ cpu: "250m"
+ limits:
+ memory: "4Gi"
+ cpu: "4"
+
+## Define and create Kubernetes Service.
+##
+## Ref.: https://kubernetes.io/docs/concepts/services-networking/service
+#
+service:
+ ## Whether to enable session affinity or not. It is required by ingress.
+ #
+ enabled: true
+
+ ## Choose the kind of Service:
+ ##
+ ## "ClusterIP" => Exposes the Service on a cluster-internal IP. Choosing this value makes the Service only reachable
+ ## from within the cluster. This is the default that is used if you don't explicitly specify a type for
+ ## a Service.
+ ## "NodePort" => Exposes the Service on each Node's IP at a static port (the NodePort). To make the node port
+ ## available, Kubernetes sets up a cluster IP address, the same as if you had requested a Service of
+ ## type: ClusterIP.
+ ## "LoadBalancer" => Exposes the Service externally using a cloud provider's load balancer.
+ ##
+ ## Ref.: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
+ #
+ type: "ClusterIP"
+
+ ## Define the ports of Service.
+ ## You can set the port value to an arbitrary value, it will map the container port by name.
+ ##
+ ## Custom NodePort example:
+ ## ports:
+ ## http:
+ ## port: 8080
+ ## protocol: "TCP"
+ ## nodePort: "38080"
+ #
+ ports:
+ http:
+ containerPort: 8080
+ port: 8080
+ protocol: "TCP"
+
+ ## Configure session affinity for to hit the same backend for the period specified in `timeoutSeconds`.
+ ##
+ ## Ref.: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity
+ #
+ sessionAffinity:
+ ## Whether to enable session affinity or not.
+ #
+ enabled: false
+ ## The session duration in seconds.
+ #
+ timeoutSeconds: 10800
+
+## Define Service Accounts for Pods.
+##
+## Ref.: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+#
+serviceAccount:
+ ## Whether to create service account.
+ #
+ create: true
+
+ ## Define custom service account annotations.
+ #
+ annotations: {}
+
+# Options for the seeder job
+seederJob:
+ ## Define custom seeder job annotations.
+ #
+ annotations: {}
+
+## Tolerations for pod assignment.
+##
+## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+#
+tolerations: []
diff --git a/charts/penpot/.helmignore b/charts/penpot/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/penpot/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/penpot/Chart.lock b/charts/penpot/Chart.lock
new file mode 100644
index 0000000..cb02383
--- /dev/null
+++ b/charts/penpot/Chart.lock
@@ -0,0 +1,9 @@
+dependencies:
+- name: postgresql
+ repository: https://charts.bitnami.com/bitnami
+ version: 12.1.9
+- name: redis
+ repository: https://charts.bitnami.com/bitnami
+ version: 17.6.0
+digest: sha256:6d03e5f311bc795d79fb37b7c2e7482daceba7f2ae20a22edc79ca0e33fcd88f
+generated: "2023-01-24T07:40:53.346006552Z"
diff --git a/charts/penpot/Chart.yaml b/charts/penpot/Chart.yaml
new file mode 100644
index 0000000..74d0384
--- /dev/null
+++ b/charts/penpot/Chart.yaml
@@ -0,0 +1,26 @@
+apiVersion: v2
+appVersion: 1.16.0-beta
+dependencies:
+- condition: global.postgresqlEnabled
+ name: postgresql
+ repository: https://charts.bitnami.com/bitnami
+ version: 12.x.x
+- condition: global.redisEnabled
+ name: redis
+ repository: https://charts.bitnami.com/bitnami
+ version: 17.x.x
+description: CodeChem Penpot Helm Chart
+home: https://github.com/codechem/helm/tree/main/charts/penpot
+icon: https://avatars.githubusercontent.com/u/30179644?s=200&v=4
+keywords:
+- kubernetes
+- penpot
+- penpotapp
+- design
+maintainers:
+- name: codechem
+ url: https://codechem.com
+name: penpot
+sources:
+- https://github.com/penpot/penpot
+version: 1.0.10
diff --git a/charts/penpot/README.md b/charts/penpot/README.md
new file mode 100644
index 0000000..b4099a4
--- /dev/null
+++ b/charts/penpot/README.md
@@ -0,0 +1,309 @@
+# Penpot
+
+Penpot is the first Open Source design and prototyping platform meant for cross-domain teams. Non dependent on operating systems, Penpot is web based and works with open standards (SVG). Penpot invites designers all over the world to fall in love with open source while getting developers excited about the design process in return.
+
+## TL;DR
+
+```console
+helm repo add codechem https://charts.codechem.com
+helm install penpot codechem/penpot
+```
+
+## Introduction
+
+Penpot makes design and prototyping accessible to every team in the world. It has a clear focus on design and code teams and its capabilities reflect exactly that. The less hand-off mindset, the more fun for everyone. Being web based, Penpot is not dependent on operating systems or local installations, you will only need to run a modern browser. Using SVG as no other design and prototyping tool does, Penpot files sport compatibility with most of the vectorial tools, are tech friendly and extremely easy to use on the web. It makes sure you will always own your work.
+
+## Prerequisites
+
+- Kubernetes 1.18+
+- Helm 3.2.0+
+
+## Installing the Chart
+
+To install the chart with the release name `penpot`:
+
+```console
+helm install penpot codechem/penpot
+```
+
+The command deploys penpot on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `penpot` deployment:
+
+```console
+helm delete penpot
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Parameters
+
+### Global parameters
+
+| Name | Description | Value |
+| -------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------- |
+| `global.postgresqlEnabled` | Whether to deploy the Bitnami PostgreSQL chart as subchart. Check [the official chart](https://artifacthub.io/packages/helm/bitnami/postgresql) for configuration. | `false` |
+| `global.redisEnabled` | Whether to deploy the Bitnami Redis chart as subchart. Check [the official chart](https://artifacthub.io/packages/helm/bitnami/redis) for configuration. | `false` |
+| `global.imagePullSecrets` | Global Docker registry secret names as an array. | `[]` |
+
+
+### Common parameters
+
+| Name | Description | Value |
+| ---------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ------ |
+| `nameOverride` | String to partially override common.names.fullname | `""` |
+| `fullnameOverride` | String to fully override common.names.fullname | `""` |
+| `serviceAccount.enabled` | Specifies whether a ServiceAccount should be created. | `true` |
+| `serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` |
+| `serviceAccount.name` | The name of the ServiceAccount to use. If not set and enabled is true, a name is generated using the fullname template. | `""` |
+
+
+### Backend parameters
+
+| Name | Description | Value |
+| ----------------------------------------------------------- | ------------------------------------------------------------------ | ------------------- |
+| `backend.image.repository` | The Docker repository to pull the image from. | `penpotapp/backend` |
+| `backend.image.tag` | The image tag to use. | `1.16.0-beta` |
+| `backend.image.imagePullPolicy` | The image pull policy to use. | `IfNotPresent` |
+| `backend.replicaCount` | The number of replicas to deploy. | `1` |
+| `backend.service.type` | The service type to create. | `ClusterIP` |
+| `backend.service.port` | The service port to use. | `6060` |
+| `backend.podSecurityContext.enabled` | Enabled Penpot pods' security context | `true` |
+| `backend.podSecurityContext.fsGroup` | Set Penpot pod's security context fsGroup | `1001` |
+| `backend.containerSecurityContext.enabled` | Enabled Penpot containers' security context | `true` |
+| `backend.containerSecurityContext.runAsUser` | Set Penpot containers' security context runAsUser | `1001` |
+| `backend.containerSecurityContext.allowPrivilegeEscalation` | Set Penpot containers' security context allowPrivilegeEscalation | `false` |
+| `backend.containerSecurityContext.capabilities.drop` | Set Penpot containers' security context capabilities to be dropped | `["all"]` |
+| `backend.containerSecurityContext.readOnlyRootFilesystem` | Set Penpot containers' security context readOnlyRootFilesystem | `false` |
+| `backend.containerSecurityContext.runAsNonRoot` | Set Penpot container's security context runAsNonRoot | `true` |
+| `backend.affinity` | Affinity for Penpot pods assignment | `{}` |
+| `backend.nodeSelector` | Node labels for Penpot pods assignment | `{}` |
+| `backend.tolerations` | Tolerations for Penpot pods assignment | `[]` |
+| `backend.resources.limits` | The resources limits for the Penpot backend containers | `{}` |
+| `backend.resources.requests` | The requested resources for the Penpot backend containers | `{}` |
+
+
+### Frontend parameters
+
+| Name | Description | Value |
+| -------------------------------- | ---------------------------------------------------------- | -------------------- |
+| `frontend.image.repository` | The Docker repository to pull the image from. | `penpotapp/frontend` |
+| `frontend.image.tag` | The image tag to use. | `1.16.0-beta` |
+| `frontend.image.imagePullPolicy` | The image pull policy to use. | `IfNotPresent` |
+| `frontend.replicaCount` | The number of replicas to deploy. | `1` |
+| `frontend.service.type` | The service type to create. | `ClusterIP` |
+| `frontend.service.port` | The service port to use. | `80` |
+| `frontend.ingress.enabled` | Enable ingress record generation for Penpot frontend. | `false` |
+| `frontend.ingress.annotations` | Mapped annotations for the frontend ingress. | `{}` |
+| `frontend.ingress.hosts` | Array style hosts for the frontend ingress. | `[]` |
+| `frontend.ingress.tls` | Array style TLS secrets for the frontend ingress. | `[]` |
+| `frontend.affinity` | Affinity for Penpot pods assignment | `{}` |
+| `frontend.nodeSelector` | Node labels for Penpot pods assignment | `{}` |
+| `frontend.tolerations` | Tolerations for Penpot pods assignment | `[]` |
+| `frontend.resources.limits` | The resources limits for the Penpot frontend containers | `{}` |
+| `frontend.resources.requests` | The requested resources for the Penpot frontend containers | `{}` |
+
+
+### Exporter parameters
+
+| Name | Description | Value |
+| ------------------------------------------------------------ | ------------------------------------------------------------------ | -------------------- |
+| `exporter.image.repository` | The Docker repository to pull the image from. | `penpotapp/exporter` |
+| `exporter.image.tag` | The image tag to use. | `1.16.0-beta` |
+| `exporter.image.imagePullPolicy` | The image pull policy to use. | `IfNotPresent` |
+| `exporter.replicaCount` | The number of replicas to deploy. | `1` |
+| `exporter.service.type` | The service type to create. | `ClusterIP` |
+| `exporter.service.port` | The service port to use. | `6061` |
+| `exporter.podSecurityContext.enabled` | Enabled Penpot pods' security context | `true` |
+| `exporter.podSecurityContext.fsGroup` | Set Penpot pod's security context fsGroup | `1001` |
+| `exporter.containerSecurityContext.enabled` | Enabled Penpot containers' security context | `true` |
+| `exporter.containerSecurityContext.runAsUser` | Set Penpot containers' security context runAsUser | `1001` |
+| `exporter.containerSecurityContext.allowPrivilegeEscalation` | Set Penpot containers' security context allowPrivilegeEscalation | `false` |
+| `exporter.containerSecurityContext.capabilities.drop` | Set Penpot containers' security context capabilities to be dropped | `["all"]` |
+| `exporter.containerSecurityContext.readOnlyRootFilesystem` | Set Penpot containers' security context readOnlyRootFilesystem | `false` |
+| `exporter.containerSecurityContext.runAsNonRoot` | Set Penpot container's security context runAsNonRoot | `true` |
+| `exporter.affinity` | Affinity for Penpot pods assignment | `{}` |
+| `exporter.nodeSelector` | Node labels for Penpot pods assignment | `{}` |
+| `exporter.tolerations` | Tolerations for Penpot pods assignment | `[]` |
+| `exporter.resources.limits` | The resources limits for the Penpot exporter containers | `{}` |
+| `exporter.resources.requests` | The requested resources for the Penpot exporter containers | `{}` |
+
+
+### Persistence parameters
+
+| Name | Description | Value |
+| --------------------------- | --------------------------------------------------- | ------------------- |
+| `persistence.enabled` | Enable persistence using Persistent Volume Claims. | `false` |
+| `persistence.storageClass` | Persistent Volume storage class. | `""` |
+| `persistence.size` | Persistent Volume size. | `8Gi` |
+| `persistence.existingClaim` | The name of an existing PVC to use for persistence. | `""` |
+| `persistence.accessModes` | Persistent Volume access modes. | `["ReadWriteOnce"]` |
+| `persistence.annotations` | Persistent Volume Claim annotations. | `{}` |
+
+
+### Configuration parameters
+
+| Name | Description | Value |
+| --------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------- |
+| `config.publicURI` | The public domain to serve Penpot on. Set `disable-secure-session-cookies` in the flags if you plan on serving it on a non HTTPS domain. | `http://localhost:8080` |
+| `config.flags` | The feature flags to enable. Check [the official docs](https://help.penpot.app/technical-guide/configuration/) for more info. | `enable-registration enable-login disable-demo-users disable-demo-warning` |
+| `config.apiSecretKey` | A random secret key needed for persistent user sessions. Generate with `openssl rand -hex 16` for example. | `b46a12cb4bedc6b9df8cb3f18c708b65` |
+| `config.postgresql.host` | The PostgreSQL host to connect to. | `postgresql.penpot.svc.cluster.local` |
+| `config.postgresql.port` | The PostgreSQL host port to use. | `5432` |
+| `config.postgresql.database` | The PostgreSQL database to use. | `""` |
+| `config.postgresql.username` | The database username to use. | `""` |
+| `config.postgresql.password` | The database username to use. | `""` |
+| `config.postgresql.existingSecret` | The name of an existing secret. | `""` |
+| `config.postgresql.secretKeys.usernameKey` | The username key to use from an existing secret. | `""` |
+| `config.postgresql.secretKeys.passwordKey` | The password key to use from an existing secret. | `""` |
+| `config.redis.host` | The Redis host to connect to. | `redis-headless.penpot.svc.cluster.local` |
+| `config.redis.port` | The Redis host port to use. | `6379` |
+| `config.redis.database` | The Redis database to connect to. | `0` |
+| `config.assets.storageBackend` | The storage backend for assets to use. Use `assets-fs` for filesystem, and `assets-s3` for S3. | `assets-fs` |
+| `config.assets.filesystem.directory` | The storage directory to use if you chose the filesystem storage backend. | `/opt/data/assets` |
+| `config.assets.s3.accessKeyID` | The S3 access key ID to use if you chose the S3 storage backend. | `""` |
+| `config.assets.s3.secretAccessKey` | The S3 secret access key to use if you chose the S3 storage backend. | `""` |
+| `config.assets.s3.region` | The S3 region to use if you chose the S3 storage backend. | `""` |
+| `config.assets.s3.bucket` | The name of the S3 bucket to use if you chose the S3 storage backend. | `""` |
+| `config.assets.s3.endpointURI` | The S3 endpoint URI to use if you chose the S3 storage backend. | `""` |
+| `config.assets.s3.existingSecret` | The name of an existing secret. | `""` |
+| `config.assets.s3.secretKeys.accessKeyIDKey` | The S3 access key ID to use from an existing secret. | `""` |
+| `config.assets.s3.secretKeys.secretAccessKey` | The S3 secret access key to use from an existing secret. | `""` |
+| `config.assets.s3.secretKeys.endpointURIKey` | The S3 endpoint URI to use from an existing secret. | `""` |
+| `config.telemetryEnabled` | Whether to enable sending of anonymous telemetry data. | `true` |
+| `config.smtp.enabled` | Whether to enable SMTP configuration. You also need to add the 'enable-smtp' flag to the PENPOT_FLAGS variable. | `false` |
+| `config.smtp.defaultFrom` | The SMTP default email to send from. | `""` |
+| `config.smtp.defaultReplyTo` | The SMTP default email to reply to. | `""` |
+| `config.smtp.host` | The SMTP host to use. | `""` |
+| `config.smtp.port` | The SMTP host port to use. | `""` |
+| `config.smtp.username` | The SMTP username to use. | `""` |
+| `config.smtp.password` | The SMTP password to use. | `""` |
+| `config.smtp.tls` | Whether to use TLS for the SMTP connection. | `true` |
+| `config.smtp.ssl` | Whether to use SSL for the SMTP connection. | `false` |
+| `config.smtp.existingSecret` | The name of an existing secret. | `""` |
+| `config.smtp.secretKeys.usernameKey` | The SMTP username to use from an existing secret. | `""` |
+| `config.smtp.secretKeys.passwordKey` | The SMTP password to use from an existing secret. | `""` |
+| `config.registrationDomainWhitelist` | Comma separated list of allowed domains to register. Empty to allow all domains. | `""` |
+| `config.providers.google.enabled` | Whether to enable Google configuration. To enable Google auth, add `enable-login-with-google` to the flags. | `false` |
+| `config.providers.google.clientID` | The Google client ID to use. To enable Google auth, add `enable-login-with-google` to the flags. | `""` |
+| `config.providers.google.clientSecret` | The Google client secret to use. To enable Google auth, add `enable-login-with-google` to the flags. | `""` |
+| `config.providers.github.enabled` | Whether to enable GitHub configuration. To enable GitHub auth, also add `enable-login-with-github` to the flags. | `false` |
+| `config.providers.github.clientID` | The GitHub client ID to use. | `""` |
+| `config.providers.github.clientSecret` | The GitHub client secret to use. | `""` |
+| `config.providers.gitlab.enabled` | Whether to enable GitLab configuration. To enable GitLab auth, also add `enable-login-with-gitlab` to the flags. | `false` |
+| `config.providers.gitlab.baseURI` | The GitLab base URI to use. | `https://gitlab.com` |
+| `config.providers.gitlab.clientID` | The GitLab client ID to use. | `""` |
+| `config.providers.gitlab.clientSecret` | The GitLab client secret to use. | `""` |
+| `config.providers.oidc.enabled` | Whether to enable OIDC configuration. To enable OpenID Connect auth, also add `enable-login-with-oidc` to the flags. | `false` |
+| `config.providers.oidc.baseURI` | The OpenID Connect base URI to use. | `""` |
+| `config.providers.oidc.clientID` | The OpenID Connect client ID to use. | `""` |
+| `config.providers.oidc.clientSecret` | The OpenID Connect client secret to use. | `""` |
+| `config.providers.oidc.authURI` | Optional OpenID Connect auth URI to use. Auto discovered if not provided. | `""` |
+| `config.providers.oidc.tokenURI` | Optional OpenID Connect token URI to use. Auto discovered if not provided. | `""` |
+| `config.providers.oidc.userURI` | Optional OpenID Connect user URI to use. Auto discovered if not provided. | `""` |
+| `config.providers.oidc.roles` | Optional OpenID Connect roles to use. If no role is provided, roles checking disabled. | `role1 role2` |
+| `config.providers.oidc.rolesAttribute` | Optional OpenID Connect roles attribute to use. If not provided, the roles checking will be disabled. | `""` |
+| `config.providers.oidc.scopes` | Optional OpenID Connect scopes to use. This settings allow overwrite the required scopes, use with caution because penpot requres at least `name` and `email` attrs found on the user info. Optional, defaults to `openid profile`. | `scope1 scope2` |
+| `config.providers.oidc.nameAttribute` | Optional OpenID Connect name attribute to use. If not provided, the `name` prop will be used. | `""` |
+| `config.providers.oidc.emailAttribute` | Optional OpenID Connect email attribute to use. If not provided, the `email` prop will be used. | `""` |
+| `config.providers.ldap.enabled` | Whether to enable LDAP configuration. To enable LDAP, also add `enable-login-with-ldap` to the flags. | `false` |
+| `config.providers.ldap.host` | The LDAP host to use. | `ldap` |
+| `config.providers.ldap.port` | The LDAP port to use. | `10389` |
+| `config.providers.ldap.ssl` | Whether to use SSL for the LDAP connection. | `false` |
+| `config.providers.ldap.startTLS` | Whether to utilize StartTLS for the LDAP connection. | `false` |
+| `config.providers.ldap.baseDN` | The LDAP base DN to use. | `ou=people,dc=planetexpress,dc=com` |
+| `config.providers.ldap.bindDN` | The LDAP bind DN to use. | `cn=admin,dc=planetexpress,dc=com` |
+| `config.providers.ldap.bindPassword` | The LDAP bind password to use. | `GoodNewsEveryone` |
+| `config.providers.ldap.attributesUsername` | The LDAP attributes username to use. | `uid` |
+| `config.providers.ldap.attributesEmail` | The LDAP attributes email to use. | `mail` |
+| `config.providers.ldap.attributesFullname` | The LDAP attributes fullname to use. | `cn` |
+| `config.providers.ldap.attributesPhoto` | The LDAP attributes photo format to use. | `jpegPhoto` |
+| `config.providers.existingSecret` | The name of an existing secret to use. | `""` |
+| `config.providers.secretKeys.googleClientIDKey` | The Google client ID key to use from an existing secret. | `""` |
+| `config.providers.secretKeys.googleClientSecretKey` | The Google client secret key to use from an existing secret. | `""` |
+| `config.providers.secretKeys.githubClientIDKey` | The GitHub client ID key to use from an existing secret. | `""` |
+| `config.providers.secretKeys.githubClientSecretKey` | The GitHub client secret key to use from an existing secret. | `""` |
+| `config.providers.secretKeys.gitlabClientIDKey` | The GitLab client ID key to use from an existing secret. | `""` |
+| `config.providers.secretKeys.gitlabClientSecretKey` | The GitLab client secret key to use from an existing secret. | `""` |
+| `config.providers.secretKeys.oidcClientIDKey` | The OpenID Connect client ID key to use from an existing secret. | `""` |
+| `config.providers.secretKeys.oidcClientSecretKey` | The OpenID Connect client secret key to use from an existing secret. | `""` |
+
+
+### PostgreSQL configuration (Check for [more parameters here](https://artifacthub.io/packages/helm/bitnami/postgresql))
+
+| Name | Description | Value |
+| -------------------------- | --------------------------------------- | ---------------- |
+| `postgresql.auth.username` | Name for a custom user to create. | `example` |
+| `postgresql.auth.password` | Password for the custom user to create. | `secretpassword` |
+| `postgresql.auth.database` | Name for a custom database to create. | `penpot` |
+
+
+### Redis configuration (Check for [more parameters here](https://artifacthub.io/packages/helm/bitnami/redis))
+
+| Name | Description | Value |
+| -------------------- | ------------------------------------------ | ------- |
+| `redis.auth.enabled` | Whether to enable password authentication. | `false` |
+
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+helm install example \
+ --set user=example \
+ --set password=example \
+ codechem/example
+```
+
+Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
+
+```console
+helm install example -f values.yaml codechem/example
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml)
+
+## Configuration and installation details
+
+## Common configuration
+
+There are two types of configuration: options (properties that requieres some value) and flags (that just enables or disables something). The PENPOT_FLAGS environment variable will have an ordered list of strings using this format: `<enable|disable>-<flag-name>`.
+
+Regarding the flags, they are all listed in the [official docs](https://help.penpot.app/technical-guide/configuration), and here are the [additional flags](https://help.penpot.app/technical-guide/configuration/#other-flags) which are not mentioned in the chart configuration above, but you can still use them!
+
+## Authentication providers
+
+For configuration of the authentication with third-party auth providers you will need to configure penpot and set the correct callback of your penpot instance in the auth-provider configuration. The callback has the following format:
+
+```txt
+<https://<your_domain>/api/auth/oauth/<oauth_provider>/callback>
+```
+
+You will need to change `<your_domain>` and `<oauth_provider>` according to your setup. This is how it looks with the `gitlab.com` provider:
+
+```txt
+<https://<your_domain>/api/auth/oauth/gitlab/callback>
+```
+
+## Redis configuration
+
+The redis configuration is very simple, just provide a valid Redis URI. Redis is used mainly for websocket notifications coordination. Currently just a non authentication connection is supported!
+
+## License
+
+Copyright © 2022 CodeChem
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/charts/penpot/charts/postgresql/.helmignore b/charts/penpot/charts/postgresql/.helmignore
new file mode 100644
index 0000000..f0c1319
--- /dev/null
+++ b/charts/penpot/charts/postgresql/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/charts/penpot/charts/postgresql/Chart.lock b/charts/penpot/charts/postgresql/Chart.lock
new file mode 100644
index 0000000..912a3a8
--- /dev/null
+++ b/charts/penpot/charts/postgresql/Chart.lock
@@ -0,0 +1,6 @@
+dependencies:
+- name: common
+ repository: https://charts.bitnami.com/bitnami
+ version: 2.2.2
+digest: sha256:49ca75cf23ba5eb7df4becef52580f98c8bd8194eb80368b9d7b875f6eefa8e5
+generated: "2022-12-14T19:37:46.129876178Z"
diff --git a/charts/penpot/charts/postgresql/Chart.yaml b/charts/penpot/charts/postgresql/Chart.yaml
new file mode 100644
index 0000000..6e6b7cc
--- /dev/null
+++ b/charts/penpot/charts/postgresql/Chart.yaml
@@ -0,0 +1,30 @@
+annotations:
+ category: Database
+apiVersion: v2
+appVersion: 15.1.0
+dependencies:
+- name: common
+ repository: https://charts.bitnami.com/bitnami
+ tags:
+ - bitnami-common
+ version: 2.x.x
+description: PostgreSQL (Postgres) is an open source object-relational database known
+ for reliability and data integrity. ACID-compliant, it supports foreign keys, joins,
+ views, triggers and stored procedures.
+home: https://github.com/bitnami/charts/tree/main/bitnami/postgresql
+icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-220x234.png
+keywords:
+- postgresql
+- postgres
+- database
+- sql
+- replication
+- cluster
+maintainers:
+- name: Bitnami
+ url: https://github.com/bitnami/charts
+name: postgresql
+sources:
+- https://github.com/bitnami/containers/tree/main/bitnami/postgresql
+- https://www.postgresql.org/
+version: 12.1.9
diff --git a/charts/penpot/charts/postgresql/README.md b/charts/penpot/charts/postgresql/README.md
new file mode 100644
index 0000000..ed66b5d
--- /dev/null
+++ b/charts/penpot/charts/postgresql/README.md
@@ -0,0 +1,693 @@
+<!--- app-name: PostgreSQL -->
+
+# PostgreSQL packaged by Bitnami
+
+PostgreSQL (Postgres) is an open source object-relational database known for reliability and data integrity. ACID-compliant, it supports foreign keys, joins, views, triggers and stored procedures.
+
+[Overview of PostgreSQL](http://www.postgresql.org)
+
+Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement.
+
+## TL;DR
+
+```bash
+helm repo add my-repo https://charts.bitnami.com/bitnami
+helm install my-release my-repo/postgresql
+```
+
+## Introduction
+
+This chart bootstraps a [PostgreSQL](https://github.com/bitnami/containers/tree/main/bitnami/postgresql) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+For HA, please see [this repo](https://github.com/bitnami/charts/tree/main/bitnami/postgresql-ha)
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
+
+## Prerequisites
+
+- Kubernetes 1.19+
+- Helm 3.2.0+
+- PV provisioner support in the underlying infrastructure
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```bash
+helm install my-release my-repo/postgresql
+```
+
+The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```console
+helm delete my-release
+```
+
+The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release.
+
+To delete the PVC's associated with `my-release`:
+
+```bash
+kubectl delete pvc -l release=my-release
+```
+
+> **Note**: Deleting the PVC's will delete postgresql data as well. Please be cautious before doing it.
+
+## Parameters
+
+### Global parameters
+
+| Name | Description | Value |
+| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----- |
+| `global.imageRegistry` | Global Docker image registry | `""` |
+| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
+| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
+| `global.postgresql.auth.postgresPassword` | Password for the "postgres" admin user (overrides `auth.postgresPassword`) | `""` |
+| `global.postgresql.auth.username` | Name for a custom user to create (overrides `auth.username`) | `""` |
+| `global.postgresql.auth.password` | Password for the custom user to create (overrides `auth.password`) | `""` |
+| `global.postgresql.auth.database` | Name for a custom database to create (overrides `auth.database`) | `""` |
+| `global.postgresql.auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`). | `""` |
+| `global.postgresql.auth.secretKeys.adminPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` |
+| `global.postgresql.auth.secretKeys.userPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` |
+| `global.postgresql.auth.secretKeys.replicationPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` |
+| `global.postgresql.service.ports.postgresql` | PostgreSQL service port (overrides `service.ports.postgresql`) | `""` |
+
+
+### Common parameters
+
+| Name | Description | Value |
+| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- |
+| `kubeVersion` | Override Kubernetes version | `""` |
+| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` |
+| `fullnameOverride` | String to fully override common.names.fullname template | `""` |
+| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` |
+| `extraDeploy` | Array of extra objects to deploy with the release (evaluated as a template) | `[]` |
+| `commonLabels` | Add labels to all the deployed resources | `{}` |
+| `commonAnnotations` | Add annotations to all the deployed resources | `{}` |
+| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` |
+| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` |
+| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` |
+
+
+### PostgreSQL common parameters
+
+| Name | Description | Value |
+| ---------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- |
+| `image.registry` | PostgreSQL image registry | `docker.io` |
+| `image.repository` | PostgreSQL image repository | `bitnami/postgresql` |
+| `image.tag` | PostgreSQL image tag (immutable tags are recommended) | `15.1.0-debian-11-r20` |
+| `image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
+| `image.pullPolicy` | PostgreSQL image pull policy | `IfNotPresent` |
+| `image.pullSecrets` | Specify image pull secrets | `[]` |
+| `image.debug` | Specify if debug values should be set | `false` |
+| `auth.enablePostgresUser` | Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user | `true` |
+| `auth.postgresPassword` | Password for the "postgres" admin user. Ignored if `auth.existingSecret` with key `postgres-password` is provided | `""` |
+| `auth.username` | Name for a custom user to create | `""` |
+| `auth.password` | Password for the custom user to create. Ignored if `auth.existingSecret` with key `password` is provided | `""` |
+| `auth.database` | Name for a custom database to create | `""` |
+| `auth.replicationUsername` | Name of the replication user | `repl_user` |
+| `auth.replicationPassword` | Password for the replication user. Ignored if `auth.existingSecret` with key `replication-password` is provided | `""` |
+| `auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. | `""` |
+| `auth.secretKeys.adminPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `postgres-password` |
+| `auth.secretKeys.userPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `password` |
+| `auth.secretKeys.replicationPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `replication-password` |
+| `auth.usePasswordFiles` | Mount credentials as a files instead of using an environment variable | `false` |
+| `architecture` | PostgreSQL architecture (`standalone` or `replication`) | `standalone` |
+| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` |
+| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. | `0` |
+| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` |
+| `containerPorts.postgresql` | PostgreSQL container port | `5432` |
+| `audit.logHostname` | Log client hostnames | `false` |
+| `audit.logConnections` | Add client log-in operations to the log file | `false` |
+| `audit.logDisconnections` | Add client log-outs operations to the log file | `false` |
+| `audit.pgAuditLog` | Add operations to log using the pgAudit extension | `""` |
+| `audit.pgAuditLogCatalog` | Log catalog using pgAudit | `off` |
+| `audit.clientMinMessages` | Message log level to share with the user | `error` |
+| `audit.logLinePrefix` | Template for log line prefix (default if not set) | `""` |
+| `audit.logTimezone` | Timezone for the log timestamps | `""` |
+| `ldap.enabled` | Enable LDAP support | `false` |
+| `ldap.server` | IP address or name of the LDAP server. | `""` |
+| `ldap.port` | Port number on the LDAP server to connect to | `""` |
+| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `""` |
+| `ldap.suffix` | String to append to the user name when forming the DN to bind | `""` |
+| `ldap.basedn` | Root DN to begin the search for the user in | `""` |
+| `ldap.binddn` | DN of user to bind to LDAP | `""` |
+| `ldap.bindpw` | Password for the user to bind to LDAP | `""` |
+| `ldap.searchAttribute` | Attribute to match against the user name in the search | `""` |
+| `ldap.searchFilter` | The search filter to use when doing search+bind authentication | `""` |
+| `ldap.scheme` | Set to `ldaps` to use LDAPS | `""` |
+| `ldap.tls.enabled` | Se to true to enable TLS encryption | `false` |
+| `ldap.uri` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored. | `""` |
+| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql/data` |
+| `postgresqlSharedPreloadLibraries` | Shared preload libraries (comma-separated list) | `pgaudit` |
+| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) | `true` |
+| `shmVolume.sizeLimit` | Set this to enable a size limit on the shm tmpfs | `""` |
+| `tls.enabled` | Enable TLS traffic support | `false` |
+| `tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` |
+| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` |
+| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `""` |
+| `tls.certFilename` | Certificate filename | `""` |
+| `tls.certKeyFilename` | Certificate key filename | `""` |
+| `tls.certCAFilename` | CA Certificate filename | `""` |
+| `tls.crlFilename` | File containing a Certificate Revocation List | `""` |
+
+
+### PostgreSQL Primary parameters
+
+| Name | Description | Value |
+| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------------- |
+| `primary.name` | Name of the primary database (eg primary, master, leader, ...) | `primary` |
+| `primary.configuration` | PostgreSQL Primary main configuration to be injected as ConfigMap | `""` |
+| `primary.pgHbaConfiguration` | PostgreSQL Primary client authentication configuration | `""` |
+| `primary.existingConfigmap` | Name of an existing ConfigMap with PostgreSQL Primary configuration | `""` |
+| `primary.extendedConfiguration` | Extended PostgreSQL Primary configuration (appended to main or default configuration) | `""` |
+| `primary.existingExtendedConfigmap` | Name of an existing ConfigMap with PostgreSQL Primary extended configuration | `""` |
+| `primary.initdb.args` | PostgreSQL initdb extra arguments | `""` |
+| `primary.initdb.postgresqlWalDir` | Specify a custom location for the PostgreSQL transaction log | `""` |
+| `primary.initdb.scripts` | Dictionary of initdb scripts | `{}` |
+| `primary.initdb.scriptsConfigMap` | ConfigMap with scripts to be run at first boot | `""` |
+| `primary.initdb.scriptsSecret` | Secret with scripts to be run at first boot (in case it contains sensitive information) | `""` |
+| `primary.initdb.user` | Specify the PostgreSQL username to execute the initdb scripts | `""` |
+| `primary.initdb.password` | Specify the PostgreSQL password to execute the initdb scripts | `""` |
+| `primary.standby.enabled` | Whether to enable current cluster's primary as standby server of another cluster or not | `false` |
+| `primary.standby.primaryHost` | The Host of replication primary in the other cluster | `""` |
+| `primary.standby.primaryPort` | The Port of replication primary in the other cluster | `""` |
+| `primary.extraEnvVars` | Array with extra environment variables to add to PostgreSQL Primary nodes | `[]` |
+| `primary.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes | `""` |
+| `primary.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for PostgreSQL Primary nodes | `""` |
+| `primary.command` | Override default container command (useful when using custom images) | `[]` |
+| `primary.args` | Override default container args (useful when using custom images) | `[]` |
+| `primary.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL Primary containers | `true` |
+| `primary.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` |
+| `primary.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
+| `primary.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
+| `primary.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
+| `primary.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
+| `primary.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL Primary containers | `true` |
+| `primary.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
+| `primary.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
+| `primary.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` |
+| `primary.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` |
+| `primary.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
+| `primary.startupProbe.enabled` | Enable startupProbe on PostgreSQL Primary containers | `false` |
+| `primary.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` |
+| `primary.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
+| `primary.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` |
+| `primary.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` |
+| `primary.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
+| `primary.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
+| `primary.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
+| `primary.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
+| `primary.lifecycleHooks` | for the PostgreSQL Primary container to automate configuration before or after startup | `{}` |
+| `primary.resources.limits` | The resources limits for the PostgreSQL Primary containers | `{}` |
+| `primary.resources.requests.memory` | The requested memory for the PostgreSQL Primary containers | `256Mi` |
+| `primary.resources.requests.cpu` | The requested cpu for the PostgreSQL Primary containers | `250m` |
+| `primary.podSecurityContext.enabled` | Enable security context | `true` |
+| `primary.podSecurityContext.fsGroup` | Group ID for the pod | `1001` |
+| `primary.containerSecurityContext.enabled` | Enable container security context | `true` |
+| `primary.containerSecurityContext.runAsUser` | User ID for the container | `1001` |
+| `primary.hostAliases` | PostgreSQL primary pods host aliases | `[]` |
+| `primary.hostNetwork` | Specify if host network should be enabled for PostgreSQL pod (postgresql primary) | `false` |
+| `primary.hostIPC` | Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) | `false` |
+| `primary.labels` | Map of labels to add to the statefulset (postgresql primary) | `{}` |
+| `primary.annotations` | Annotations for PostgreSQL primary pods | `{}` |
+| `primary.podLabels` | Map of labels to add to the pods (postgresql primary) | `{}` |
+| `primary.podAnnotations` | Map of annotations to add to the pods (postgresql primary) | `{}` |
+| `primary.podAffinityPreset` | PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `primary.podAntiAffinityPreset` | PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` |
+| `primary.nodeAffinityPreset.type` | PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `primary.nodeAffinityPreset.key` | PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. | `""` |
+| `primary.nodeAffinityPreset.values` | PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. | `[]` |
+| `primary.affinity` | Affinity for PostgreSQL primary pods assignment | `{}` |
+| `primary.nodeSelector` | Node labels for PostgreSQL primary pods assignment | `{}` |
+| `primary.tolerations` | Tolerations for PostgreSQL primary pods assignment | `[]` |
+| `primary.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
+| `primary.priorityClassName` | Priority Class to use for each pod (postgresql primary) | `""` |
+| `primary.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` |
+| `primary.terminationGracePeriodSeconds` | Seconds PostgreSQL primary pod needs to terminate gracefully | `""` |
+| `primary.updateStrategy.type` | PostgreSQL Primary statefulset strategy type | `RollingUpdate` |
+| `primary.updateStrategy.rollingUpdate` | PostgreSQL Primary statefulset rolling update configuration parameters | `{}` |
+| `primary.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) | `[]` |
+| `primary.extraVolumes` | Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) | `[]` |
+| `primary.sidecars` | Add additional sidecar containers to the PostgreSQL Primary pod(s) | `[]` |
+| `primary.initContainers` | Add additional init containers to the PostgreSQL Primary pod(s) | `[]` |
+| `primary.extraPodSpec` | Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) | `{}` |
+| `primary.service.type` | Kubernetes Service type | `ClusterIP` |
+| `primary.service.ports.postgresql` | PostgreSQL service port | `5432` |
+| `primary.service.nodePorts.postgresql` | Node port for PostgreSQL | `""` |
+| `primary.service.clusterIP` | Static clusterIP or None for headless services | `""` |
+| `primary.service.annotations` | Annotations for PostgreSQL primary service | `{}` |
+| `primary.service.loadBalancerIP` | Load balancer IP if service type is `LoadBalancer` | `""` |
+| `primary.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` |
+| `primary.service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` |
+| `primary.service.extraPorts` | Extra ports to expose in the PostgreSQL primary service | `[]` |
+| `primary.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
+| `primary.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
+| `primary.service.headless.annotations` | Additional custom annotations for headless PostgreSQL primary service | `{}` |
+| `primary.persistence.enabled` | Enable PostgreSQL Primary data persistence using PVC | `true` |
+| `primary.persistence.existingClaim` | Name of an existing PVC to use | `""` |
+| `primary.persistence.mountPath` | The path the volume will be mounted at | `/bitnami/postgresql` |
+| `primary.persistence.subPath` | The subdirectory of the volume to mount to | `""` |
+| `primary.persistence.storageClass` | PVC Storage Class for PostgreSQL Primary data volume | `""` |
+| `primary.persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `["ReadWriteOnce"]` |
+| `primary.persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` |
+| `primary.persistence.annotations` | Annotations for the PVC | `{}` |
+| `primary.persistence.labels` | Labels for the PVC | `{}` |
+| `primary.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` |
+| `primary.persistence.dataSource` | Custom PVC data source | `{}` |
+
+
+### PostgreSQL read only replica parameters (only used when `architecture` is set to `replication`)
+
+| Name | Description | Value |
+| ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------------- |
+| `readReplicas.name` | Name of the read replicas database (eg secondary, slave, ...) | `read` |
+| `readReplicas.replicaCount` | Number of PostgreSQL read only replicas | `1` |
+| `readReplicas.extendedConfiguration` | Extended PostgreSQL read only replicas configuration (appended to main or default configuration) | `""` |
+| `readReplicas.extraEnvVars` | Array with extra environment variables to add to PostgreSQL read only nodes | `[]` |
+| `readReplicas.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes | `""` |
+| `readReplicas.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for PostgreSQL read only nodes | `""` |
+| `readReplicas.command` | Override default container command (useful when using custom images) | `[]` |
+| `readReplicas.args` | Override default container args (useful when using custom images) | `[]` |
+| `readReplicas.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL read only containers | `true` |
+| `readReplicas.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` |
+| `readReplicas.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
+| `readReplicas.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
+| `readReplicas.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
+| `readReplicas.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
+| `readReplicas.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL read only containers | `true` |
+| `readReplicas.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
+| `readReplicas.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
+| `readReplicas.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` |
+| `readReplicas.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` |
+| `readReplicas.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
+| `readReplicas.startupProbe.enabled` | Enable startupProbe on PostgreSQL read only containers | `false` |
+| `readReplicas.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` |
+| `readReplicas.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
+| `readReplicas.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` |
+| `readReplicas.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` |
+| `readReplicas.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
+| `readReplicas.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
+| `readReplicas.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
+| `readReplicas.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
+| `readReplicas.lifecycleHooks` | for the PostgreSQL read only container to automate configuration before or after startup | `{}` |
+| `readReplicas.resources.limits` | The resources limits for the PostgreSQL read only containers | `{}` |
+| `readReplicas.resources.requests.memory` | The requested memory for the PostgreSQL read only containers | `256Mi` |
+| `readReplicas.resources.requests.cpu` | The requested cpu for the PostgreSQL read only containers | `250m` |
+| `readReplicas.podSecurityContext.enabled` | Enable security context | `true` |
+| `readReplicas.podSecurityContext.fsGroup` | Group ID for the pod | `1001` |
+| `readReplicas.containerSecurityContext.enabled` | Enable container security context | `true` |
+| `readReplicas.containerSecurityContext.runAsUser` | User ID for the container | `1001` |
+| `readReplicas.hostAliases` | PostgreSQL read only pods host aliases | `[]` |
+| `readReplicas.hostNetwork` | Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) | `false` |
+| `readReplicas.hostIPC` | Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) | `false` |
+| `readReplicas.labels` | Map of labels to add to the statefulset (PostgreSQL read only) | `{}` |
+| `readReplicas.annotations` | Annotations for PostgreSQL read only pods | `{}` |
+| `readReplicas.podLabels` | Map of labels to add to the pods (PostgreSQL read only) | `{}` |
+| `readReplicas.podAnnotations` | Map of annotations to add to the pods (PostgreSQL read only) | `{}` |
+| `readReplicas.podAffinityPreset` | PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `readReplicas.podAntiAffinityPreset` | PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` |
+| `readReplicas.nodeAffinityPreset.type` | PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `readReplicas.nodeAffinityPreset.key` | PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. | `""` |
+| `readReplicas.nodeAffinityPreset.values` | PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. | `[]` |
+| `readReplicas.affinity` | Affinity for PostgreSQL read only pods assignment | `{}` |
+| `readReplicas.nodeSelector` | Node labels for PostgreSQL read only pods assignment | `{}` |
+| `readReplicas.tolerations` | Tolerations for PostgreSQL read only pods assignment | `[]` |
+| `readReplicas.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
+| `readReplicas.priorityClassName` | Priority Class to use for each pod (PostgreSQL read only) | `""` |
+| `readReplicas.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` |
+| `readReplicas.terminationGracePeriodSeconds` | Seconds PostgreSQL read only pod needs to terminate gracefully | `""` |
+| `readReplicas.updateStrategy.type` | PostgreSQL read only statefulset strategy type | `RollingUpdate` |
+| `readReplicas.updateStrategy.rollingUpdate` | PostgreSQL read only statefulset rolling update configuration parameters | `{}` |
+| `readReplicas.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) | `[]` |
+| `readReplicas.extraVolumes` | Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) | `[]` |
+| `readReplicas.sidecars` | Add additional sidecar containers to the PostgreSQL read only pod(s) | `[]` |
+| `readReplicas.initContainers` | Add additional init containers to the PostgreSQL read only pod(s) | `[]` |
+| `readReplicas.extraPodSpec` | Optionally specify extra PodSpec for the PostgreSQL read only pod(s) | `{}` |
+| `readReplicas.service.type` | Kubernetes Service type | `ClusterIP` |
+| `readReplicas.service.ports.postgresql` | PostgreSQL service port | `5432` |
+| `readReplicas.service.nodePorts.postgresql` | Node port for PostgreSQL | `""` |
+| `readReplicas.service.clusterIP` | Static clusterIP or None for headless services | `""` |
+| `readReplicas.service.annotations` | Annotations for PostgreSQL read only service | `{}` |
+| `readReplicas.service.loadBalancerIP` | Load balancer IP if service type is `LoadBalancer` | `""` |
+| `readReplicas.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` |
+| `readReplicas.service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` |
+| `readReplicas.service.extraPorts` | Extra ports to expose in the PostgreSQL read only service | `[]` |
+| `readReplicas.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
+| `readReplicas.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
+| `readReplicas.service.headless.annotations` | Additional custom annotations for headless PostgreSQL read only service | `{}` |
+| `readReplicas.persistence.enabled` | Enable PostgreSQL read only data persistence using PVC | `true` |
+| `readReplicas.persistence.existingClaim` | Name of an existing PVC to use | `""` |
+| `readReplicas.persistence.mountPath` | The path the volume will be mounted at | `/bitnami/postgresql` |
+| `readReplicas.persistence.subPath` | The subdirectory of the volume to mount to | `""` |
+| `readReplicas.persistence.storageClass` | PVC Storage Class for PostgreSQL read only data volume | `""` |
+| `readReplicas.persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `["ReadWriteOnce"]` |
+| `readReplicas.persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` |
+| `readReplicas.persistence.annotations` | Annotations for the PVC | `{}` |
+| `readReplicas.persistence.labels` | Labels for the PVC | `{}` |
+| `readReplicas.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` |
+| `readReplicas.persistence.dataSource` | Custom PVC data source | `{}` |
+
+
+### NetworkPolicy parameters
+
+| Name | Description | Value |
+| ------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
+| `networkPolicy.enabled` | Enable network policies | `false` |
+| `networkPolicy.metrics.enabled` | Enable network policies for metrics (prometheus) | `false` |
+| `networkPolicy.metrics.namespaceSelector` | Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace. | `{}` |
+| `networkPolicy.metrics.podSelector` | Monitoring pod selector labels. These labels will be used to identify the Prometheus pods. | `{}` |
+| `networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled` | Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin. | `false` |
+| `networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector` | Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s). | `{}` |
+| `networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector` | Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s). | `{}` |
+| `networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules` | Custom network policy for the PostgreSQL primary node. | `{}` |
+| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled` | Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin. | `false` |
+| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector` | Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s). | `{}` |
+| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector` | Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s). | `{}` |
+| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules` | Custom network policy for the PostgreSQL read-only nodes. | `{}` |
+| `networkPolicy.egressRules.denyConnectionsToExternal` | Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53). | `false` |
+| `networkPolicy.egressRules.customRules` | Custom network policy rule | `{}` |
+
+
+### Volume Permissions parameters
+
+| Name | Description | Value |
+| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------- |
+| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` |
+| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
+| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` |
+| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r69` |
+| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
+| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
+| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` |
+| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` |
+| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` |
+| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` |
+
+
+### Other Parameters
+
+| Name | Description | Value |
+| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
+| `serviceAccount.create` | Enable creation of ServiceAccount for PostgreSQL pod | `false` |
+| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
+| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` |
+| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` |
+| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` |
+| `rbac.rules` | Custom RBAC rules to set | `[]` |
+| `psp.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` |
+
+
+### Metrics Parameters
+
+| Name | Description | Value |
+| ----------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | --------------------------- |
+| `metrics.enabled` | Start a prometheus exporter | `false` |
+| `metrics.image.registry` | PostgreSQL Prometheus Exporter image registry | `docker.io` |
+| `metrics.image.repository` | PostgreSQL Prometheus Exporter image repository | `bitnami/postgres-exporter` |
+| `metrics.image.tag` | PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) | `0.11.1-debian-11-r46` |
+| `metrics.image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
+| `metrics.image.pullPolicy` | PostgreSQL Prometheus Exporter image pull policy | `IfNotPresent` |
+| `metrics.image.pullSecrets` | Specify image pull secrets | `[]` |
+| `metrics.customMetrics` | Define additional custom metrics | `{}` |
+| `metrics.extraEnvVars` | Extra environment variables to add to PostgreSQL Prometheus exporter | `[]` |
+| `metrics.containerSecurityContext.enabled` | Enable PostgreSQL Prometheus exporter containers' Security Context | `true` |
+| `metrics.containerSecurityContext.runAsUser` | Set PostgreSQL Prometheus exporter containers' Security Context runAsUser | `1001` |
+| `metrics.containerSecurityContext.runAsNonRoot` | Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot | `true` |
+| `metrics.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL Prometheus exporter containers | `true` |
+| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` |
+| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
+| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
+| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
+| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
+| `metrics.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL Prometheus exporter containers | `true` |
+| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
+| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
+| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` |
+| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` |
+| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
+| `metrics.startupProbe.enabled` | Enable startupProbe on PostgreSQL Prometheus exporter containers | `false` |
+| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` |
+| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
+| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` |
+| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` |
+| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
+| `metrics.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
+| `metrics.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
+| `metrics.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
+| `metrics.containerPorts.metrics` | PostgreSQL Prometheus exporter metrics container port | `9187` |
+| `metrics.resources.limits` | The resources limits for the PostgreSQL Prometheus exporter container | `{}` |
+| `metrics.resources.requests` | The requested resources for the PostgreSQL Prometheus exporter container | `{}` |
+| `metrics.service.ports.metrics` | PostgreSQL Prometheus Exporter service port | `9187` |
+| `metrics.service.clusterIP` | Static clusterIP or None for headless services | `""` |
+| `metrics.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` |
+| `metrics.service.annotations` | Annotations for Prometheus to auto-discover the metrics endpoint | `{}` |
+| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` |
+| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` |
+| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` |
+| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` |
+| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` |
+| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` |
+| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
+| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` |
+| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` |
+| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` |
+| `metrics.prometheusRule.enabled` | Create a PrometheusRule for Prometheus Operator | `false` |
+| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` |
+| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` |
+| `metrics.prometheusRule.rules` | PrometheusRule definitions | `[]` |
+
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```bash
+$ helm install my-release \
+ --set auth.postgresPassword=secretpassword
+ my-repo/postgresql
+```
+
+The above command sets the PostgreSQL `postgres` account password to `secretpassword`.
+
+> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available.
+
+> **Warning** Setting a password will be ignored on new installation in case when previous Posgresql release was deleted through the helm command. In that case, old PVC will have an old password, and setting it through helm won't take effect. Deleting persistent volumes (PVs) will solve the issue. Refer to [issue 2061](https://github.com/bitnami/charts/issues/2061) for more details
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
+
+```bash
+helm install my-release -f values.yaml my-repo/postgresql
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml)
+
+## Configuration and installation details
+
+### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
+
+It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
+
+Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
+
+### Customizing primary and read replica services in a replicated configuration
+
+At the top level, there is a service object which defines the services for both primary and readReplicas. For deeper customization, there are service objects for both the primary and read types individually. This allows you to override the values in the top level service object so that the primary and read can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the primary and read to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the primary.service or readReplicas.service objects will take precedence over the top level service object.
+
+### Use a different PostgreSQL version
+
+To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter. Refer to the [chart documentation for more information on these parameters and how to use them with images from a private registry](https://docs.bitnami.com/kubernetes/infrastructure/postgresql/configuration/change-image-version/).
+
+### postgresql.conf / pg_hba.conf files as configMap
+
+This helm chart also supports to customize the PostgreSQL configuration file. You can add additional PostgreSQL configuration parameters using the `primary.extendedConfiguration`/`readReplicas.extendedConfiguration` parameters as a string. Alternatively, to replace the entire default configuration use `primary.configuration`.
+
+You can also add a custom pg_hba.conf using the `primary.pgHbaConfiguration` parameter.
+
+In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `primary.existingConfigmap` parameter. Note that this will override the two previous options.
+
+### Initialize a fresh instance
+
+The [Bitnami PostgreSQL](https://github.com/bitnami/containers/tree/main/bitnami/postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, you can specify custom scripts using the `primary.initdb.scripts` parameter as a string.
+
+In addition, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `primary.initdb.scriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `primary.initdb.scriptsSecret` parameter.
+
+The allowed extensions are `.sh`, `.sql` and `.sql.gz`.
+
+### Securing traffic using TLS
+
+TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart:
+
+- `tls.enabled`: Enable TLS support. Defaults to `false`
+- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults.
+- `tls.certFilename`: Certificate filename. No defaults.
+- `tls.certKeyFilename`: Certificate key filename. No defaults.
+
+For example:
+
+- First, create the secret with the cetificates files:
+
+ ```console
+ kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt
+ ```
+
+- Then, use the following parameters:
+
+ ```console
+ volumePermissions.enabled=true
+ tls.enabled=true
+ tls.certificatesSecret="certificates-tls-secret"
+ tls.certFilename="cert.crt"
+ tls.certKeyFilename="cert.key"
+ ```
+
+ > Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `containerSecurityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected.
+
+### Sidecars
+
+If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec.
+
+```yaml
+# For the PostgreSQL primary
+primary:
+ sidecars:
+ - name: your-image-name
+ image: your-image
+ imagePullPolicy: Always
+ ports:
+ - name: portname
+ containerPort: 1234
+# For the PostgreSQL replicas
+readReplicas:
+ sidecars:
+ - name: your-image-name
+ image: your-image
+ imagePullPolicy: Always
+ ports:
+ - name: portname
+ containerPort: 1234
+```
+
+### Metrics
+
+The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml).
+
+The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details.
+
+### Use of global variables
+
+In more complex scenarios, we may have the following tree of dependencies
+
+```
+ +--------------+
+ | |
+ +------------+ Chart 1 +-----------+
+ | | | |
+ | --------+------+ |
+ | | |
+ | | |
+ | | |
+ | | |
+ v v v
++-------+------+ +--------+------+ +--------+------+
+| | | | | |
+| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 |
+| | | | | |
++--------------+ +---------------+ +---------------+
+```
+
+The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters:
+
+```
+postgresql.auth.username=testuser
+subchart1.postgresql.auth.username=testuser
+subchart2.postgresql.auth.username=testuser
+postgresql.auth.password=testpass
+subchart1.postgresql.auth.password=testpass
+subchart2.postgresql.auth.password=testpass
+postgresql.auth.database=testdb
+subchart1.postgresql.auth.database=testdb
+subchart2.postgresql.auth.database=testdb
+```
+
+If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows:
+
+```
+global.postgresql.auth.username=testuser
+global.postgresql.auth.password=testpass
+global.postgresql.auth.database=testdb
+```
+
+This way, the credentials will be available in all of the subcharts.
+
+## Persistence
+
+The [Bitnami PostgreSQL](https://github.com/bitnami/containers/tree/main/bitnami/postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container.
+
+Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube.
+See the [Parameters](#parameters) section to configure the PVC or to disable persistence.
+
+If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to the [code present in the container repository](https://github.com/bitnami/containers/tree/main/bitnami/postgresql). If you need to use those data, please covert them to sql and import after `helm install` finished.
+
+## NetworkPolicy
+
+To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`.
+
+For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace:
+
+```bash
+kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}"
+```
+
+With NetworkPolicy enabled, traffic will be limited to just port 5432.
+
+For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL.
+This label will be displayed in the output of a successful install.
+
+## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image
+
+- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image.
+- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift.
+- For OpenShift up to 4.10, let set the volume permissions, security context, runAsUser and fsGroup automatically by OpenShift and disable the predefined settings of the helm chart: primary.securityContext.enabled=false,primary.containerSecurityContext.enabled=false,volumePermissions.enabled=false,shmVolume.enabled=false
+- For OpenShift 4.11 and higher, let set OpenShift the runAsUser and fsGroup automatically. Configure the pod and container security context to restrictive defaults and disable the volume permissions setup: primary.
+ podSecurityContext.fsGroup=null,primary.podSecurityContext.seccompProfile.type=RuntimeDefault,primary.containerSecurityContext.runAsUser=null,primary.containerSecurityContext.allowPrivilegeEscalation=false,primary.containerSecurityContext.runAsNonRoot=true,primary.containerSecurityContext.seccompProfile.type=RuntimeDefault,primary.containerSecurityContext.capabilities.drop=['ALL'],volumePermissions.enabled=false,shmVolume.enabled=false
+
+### Setting Pod's affinity
+
+This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
+
+As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters.
+
+## Troubleshooting
+
+Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
+
+## Upgrading
+
+### To 12.0.0
+
+This major version changes the default PostgreSQL image from 14.x to 15.x. Follow the [official instructions](https://www.postgresql.org/docs/15/upgrading.html) to upgrade to 15.x.
+
+### To any previous version
+
+Refer to the [chart documentation for more information about how to upgrade from previous releases](https://docs.bitnami.com/kubernetes/infrastructure/postgresql/administration/upgrade/).
+
+## License
+
+Copyright © 2022 Bitnami
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
\ No newline at end of file
diff --git a/charts/penpot/charts/postgresql/charts/common/.helmignore b/charts/penpot/charts/postgresql/charts/common/.helmignore
new file mode 100644
index 0000000..50af031
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/penpot/charts/postgresql/charts/common/Chart.yaml b/charts/penpot/charts/postgresql/charts/common/Chart.yaml
new file mode 100644
index 0000000..f9ba944
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/Chart.yaml
@@ -0,0 +1,23 @@
+annotations:
+ category: Infrastructure
+apiVersion: v2
+appVersion: 2.2.2
+description: A Library Helm Chart for grouping common logic between bitnami charts.
+ This chart is not deployable by itself.
+home: https://github.com/bitnami/charts/tree/main/bitnami/common
+icon: https://bitnami.com/downloads/logos/bitnami-mark.png
+keywords:
+- common
+- helper
+- template
+- function
+- bitnami
+maintainers:
+- name: Bitnami
+ url: https://github.com/bitnami/charts
+name: common
+sources:
+- https://github.com/bitnami/charts
+- https://www.bitnami.com/
+type: library
+version: 2.2.2
diff --git a/charts/penpot/charts/postgresql/charts/common/README.md b/charts/penpot/charts/postgresql/charts/common/README.md
new file mode 100644
index 0000000..ec43a5f
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/README.md
@@ -0,0 +1,351 @@
+# Bitnami Common Library Chart
+
+A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts.
+
+## TL;DR
+
+```yaml
+dependencies:
+ - name: common
+ version: 1.x.x
+ repository: https://charts.bitnami.com/bitnami
+```
+
+```bash
+$ helm dependency update
+```
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.names.fullname" . }}
+data:
+ myvalue: "Hello World"
+```
+
+## Introduction
+
+This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
+
+## Prerequisites
+
+- Kubernetes 1.19+
+- Helm 3.2.0+
+
+## Parameters
+
+The following table lists the helpers available in the library which are scoped in different sections.
+
+### Affinities
+
+| Helper identifier | Description | Expected Input |
+|-------------------------------|------------------------------------------------------|------------------------------------------------|
+| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` |
+| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` |
+| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` |
+| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` |
+| `common.affinities.topologyKey` | Return a topologyKey definition | `dict "topologyKey" "FOO"` |
+
+### Capabilities
+
+| Helper identifier | Description | Expected Input |
+|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------|
+| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context |
+| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context |
+| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context |
+| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context |
+| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context |
+| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context |
+| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context |
+| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context |
+| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context |
+| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context |
+| `common.capabilities.hpa.apiVersion` | Return the appropriate apiVersion for Horizontal Pod Autoscaler | `.` Chart context |
+| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context |
+
+### Errors
+
+| Helper identifier | Description | Expected Input |
+|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------|
+| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` |
+
+### Images
+
+| Helper identifier | Description | Expected Input |
+|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------|
+| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. |
+| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` |
+| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` |
+
+### Ingress
+
+| Helper identifier | Description | Expected Input |
+|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences |
+| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context |
+| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context |
+| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` |
+
+### Labels
+
+| Helper identifier | Description | Expected Input |
+|-----------------------------|-----------------------------------------------------------------------------|-------------------|
+| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context |
+| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context |
+
+### Names
+
+| Helper identifier | Description | Expected Input |
+|-----------------------------------|-----------------------------------------------------------------------|-------------------|
+| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context |
+| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context |
+| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context |
+| `common.names.fullname.namespace` | Create a fully qualified app name adding the installation's namespace | `.` Chart context |
+| `common.names.chart` | Chart name plus version | `.` Chart context |
+
+### Secrets
+
+| Helper identifier | Description | Expected Input |
+|-----------------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. |
+| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. |
+| `common.secrets.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. |
+| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` |
+
+### Storage
+
+| Helper identifier | Description | Expected Input |
+|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------|
+| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. |
+
+### TplValues
+
+| Helper identifier | Description | Expected Input |
+|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` |
+
+### Utils
+
+| Helper identifier | Description | Expected Input |
+|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------|
+| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` |
+| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` |
+| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` |
+| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` |
+
+### Validations
+
+| Helper identifier | Description | Expected Input |
+|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) |
+| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) |
+| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. |
+| `common.validations.values.mysql.passwords` | This helper will ensure required password for MySQL are not empty. It returns a shared error for all the values. | `dict "secret" "mysql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mysql chart and the helper. |
+| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. |
+| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis® are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. |
+| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. |
+| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. |
+
+### Warnings
+
+| Helper identifier | Description | Expected Input |
+|------------------------------|----------------------------------|------------------------------------------------------------|
+| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. |
+
+## Special input schemas
+
+### ImageRoot
+
+```yaml
+registry:
+ type: string
+ description: Docker registry where the image is located
+ example: docker.io
+
+repository:
+ type: string
+ description: Repository and image name
+ example: bitnami/nginx
+
+tag:
+ type: string
+ description: image tag
+ example: 1.16.1-debian-10-r63
+
+pullPolicy:
+ type: string
+ description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+
+pullSecrets:
+ type: array
+ items:
+ type: string
+ description: Optionally specify an array of imagePullSecrets (evaluated as templates).
+
+debug:
+ type: boolean
+ description: Set to true if you would like to see extra information on logs
+ example: false
+
+## An instance would be:
+# registry: docker.io
+# repository: bitnami/nginx
+# tag: 1.16.1-debian-10-r63
+# pullPolicy: IfNotPresent
+# debug: false
+```
+
+### Persistence
+
+```yaml
+enabled:
+ type: boolean
+ description: Whether enable persistence.
+ example: true
+
+storageClass:
+ type: string
+ description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning.
+ example: "-"
+
+accessMode:
+ type: string
+ description: Access mode for the Persistent Volume Storage.
+ example: ReadWriteOnce
+
+size:
+ type: string
+ description: Size the Persistent Volume Storage.
+ example: 8Gi
+
+path:
+ type: string
+ description: Path to be persisted.
+ example: /bitnami
+
+## An instance would be:
+# enabled: true
+# storageClass: "-"
+# accessMode: ReadWriteOnce
+# size: 8Gi
+# path: /bitnami
+```
+
+### ExistingSecret
+
+```yaml
+name:
+ type: string
+ description: Name of the existing secret.
+ example: mySecret
+keyMapping:
+ description: Mapping between the expected key name and the name of the key in the existing secret.
+ type: object
+
+## An instance would be:
+# name: mySecret
+# keyMapping:
+# password: myPasswordKey
+```
+
+#### Example of use
+
+When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets.
+
+```yaml
+# templates/secret.yaml
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ labels:
+ app: {{ include "common.names.fullname" . }}
+type: Opaque
+data:
+ password: {{ .Values.password | b64enc | quote }}
+
+# templates/dpl.yaml
+---
+...
+ env:
+ - name: PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }}
+ key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }}
+...
+
+# values.yaml
+---
+name: mySecret
+keyMapping:
+ password: myPasswordKey
+```
+
+### ValidateValue
+
+#### NOTES.txt
+
+```console
+{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}}
+{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}}
+
+{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
+```
+
+If we force those values to be empty we will see some alerts
+
+```console
+$ helm install test mychart --set path.to.value00="",path.to.value01=""
+ 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value:
+
+ export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d)
+
+ 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value:
+
+ export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d)
+```
+
+## Upgrading
+
+### To 1.0.0
+
+[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
+
+**What changes were introduced in this major version?**
+
+- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
+- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information.
+- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts
+
+**Considerations when upgrading to this version**
+
+- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues
+- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore
+- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3
+
+**Useful links**
+
+- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/
+- https://helm.sh/docs/topics/v2_v3_migration/
+- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/
+
+## License
+
+Copyright © 2022 Bitnami
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/charts/penpot/charts/postgresql/charts/common/templates/_affinities.tpl b/charts/penpot/charts/postgresql/charts/common/templates/_affinities.tpl
new file mode 100644
index 0000000..81902a6
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/templates/_affinities.tpl
@@ -0,0 +1,106 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return a soft nodeAffinity definition
+{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.soft" -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: {{ .key }}
+ operator: In
+ values:
+ {{- range .values }}
+ - {{ . | quote }}
+ {{- end }}
+ weight: 1
+{{- end -}}
+
+{{/*
+Return a hard nodeAffinity definition
+{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.hard" -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: {{ .key }}
+ operator: In
+ values:
+ {{- range .values }}
+ - {{ . | quote }}
+ {{- end }}
+{{- end -}}
+
+{{/*
+Return a nodeAffinity definition
+{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes" -}}
+ {{- if eq .type "soft" }}
+ {{- include "common.affinities.nodes.soft" . -}}
+ {{- else if eq .type "hard" }}
+ {{- include "common.affinities.nodes.hard" . -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Return a topologyKey definition
+{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}}
+*/}}
+{{- define "common.affinities.topologyKey" -}}
+{{ .topologyKey | default "kubernetes.io/hostname" -}}
+{{- end -}}
+
+{{/*
+Return a soft podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.soft" -}}
+{{- $component := default "" .component -}}
+{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }}
+ {{- if not (empty $component) }}
+ {{ printf "app.kubernetes.io/component: %s" $component }}
+ {{- end }}
+ {{- range $key, $value := $extraMatchLabels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+ weight: 1
+{{- end -}}
+
+{{/*
+Return a hard podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.hard" -}}
+{{- $component := default "" .component -}}
+{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }}
+ {{- if not (empty $component) }}
+ {{ printf "app.kubernetes.io/component: %s" $component }}
+ {{- end }}
+ {{- range $key, $value := $extraMatchLabels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+{{- end -}}
+
+{{/*
+Return a podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.pods" -}}
+ {{- if eq .type "soft" }}
+ {{- include "common.affinities.pods.soft" . -}}
+ {{- else if eq .type "hard" }}
+ {{- include "common.affinities.pods.hard" . -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/charts/common/templates/_capabilities.tpl b/charts/penpot/charts/postgresql/charts/common/templates/_capabilities.tpl
new file mode 100644
index 0000000..9d9b760
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/templates/_capabilities.tpl
@@ -0,0 +1,154 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return the target Kubernetes version
+*/}}
+{{- define "common.capabilities.kubeVersion" -}}
+{{- if .Values.global }}
+ {{- if .Values.global.kubeVersion }}
+ {{- .Values.global.kubeVersion -}}
+ {{- else }}
+ {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
+ {{- end -}}
+{{- else }}
+{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for poddisruptionbudget.
+*/}}
+{{- define "common.capabilities.policy.apiVersion" -}}
+{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "policy/v1beta1" -}}
+{{- else -}}
+{{- print "policy/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for networkpolicy.
+*/}}
+{{- define "common.capabilities.networkPolicy.apiVersion" -}}
+{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for cronjob.
+*/}}
+{{- define "common.capabilities.cronjob.apiVersion" -}}
+{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "batch/v1beta1" -}}
+{{- else -}}
+{{- print "batch/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for deployment.
+*/}}
+{{- define "common.capabilities.deployment.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for statefulset.
+*/}}
+{{- define "common.capabilities.statefulset.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apps/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for ingress.
+*/}}
+{{- define "common.capabilities.ingress.apiVersion" -}}
+{{- if .Values.ingress -}}
+{{- if .Values.ingress.apiVersion -}}
+{{- .Values.ingress.apiVersion -}}
+{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end }}
+{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for RBAC resources.
+*/}}
+{{- define "common.capabilities.rbac.apiVersion" -}}
+{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "rbac.authorization.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "rbac.authorization.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for CRDs.
+*/}}
+{{- define "common.capabilities.crd.apiVersion" -}}
+{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiextensions.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiextensions.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for APIService.
+*/}}
+{{- define "common.capabilities.apiService.apiVersion" -}}
+{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiregistration.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiregistration.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for Horizontal Pod Autoscaler.
+*/}}
+{{- define "common.capabilities.hpa.apiVersion" -}}
+{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}}
+{{- if .beta2 -}}
+{{- print "autoscaling/v2beta2" -}}
+{{- else -}}
+{{- print "autoscaling/v2beta1" -}}
+{{- end -}}
+{{- else -}}
+{{- print "autoscaling/v2" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if the used Helm version is 3.3+.
+A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure.
+This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error.
+**To be removed when the catalog's minimun Helm version is 3.3**
+*/}}
+{{- define "common.capabilities.supportsHelmVersion" -}}
+{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/charts/common/templates/_errors.tpl b/charts/penpot/charts/postgresql/charts/common/templates/_errors.tpl
new file mode 100644
index 0000000..a79cc2e
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/templates/_errors.tpl
@@ -0,0 +1,23 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Through error when upgrading using empty passwords values that must not be empty.
+
+Usage:
+{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}}
+{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}}
+{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }}
+
+Required password params:
+ - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error.
+ - context - Context - Required. Parent context.
+*/}}
+{{- define "common.errors.upgrade.passwords.empty" -}}
+ {{- $validationErrors := join "" .validationErrors -}}
+ {{- if and $validationErrors .context.Release.IsUpgrade -}}
+ {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}}
+ {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}}
+ {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}}
+ {{- $errorString = print $errorString "\n%s" -}}
+ {{- printf $errorString $validationErrors | fail -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/charts/common/templates/_images.tpl b/charts/penpot/charts/postgresql/charts/common/templates/_images.tpl
new file mode 100644
index 0000000..46c659e
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/templates/_images.tpl
@@ -0,0 +1,76 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Return the proper image name
+{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }}
+*/}}
+{{- define "common.images.image" -}}
+{{- $registryName := .imageRoot.registry -}}
+{{- $repositoryName := .imageRoot.repository -}}
+{{- $separator := ":" -}}
+{{- $termination := .imageRoot.tag | toString -}}
+{{- if .global }}
+ {{- if .global.imageRegistry }}
+ {{- $registryName = .global.imageRegistry -}}
+ {{- end -}}
+{{- end -}}
+{{- if .imageRoot.digest }}
+ {{- $separator = "@" -}}
+ {{- $termination = .imageRoot.digest | toString -}}
+{{- end -}}
+{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead)
+{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }}
+*/}}
+{{- define "common.images.pullSecrets" -}}
+ {{- $pullSecrets := list }}
+
+ {{- if .global }}
+ {{- range .global.imagePullSecrets -}}
+ {{- $pullSecrets = append $pullSecrets . -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- range .images -}}
+ {{- range .pullSecrets -}}
+ {{- $pullSecrets = append $pullSecrets . -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- if (not (empty $pullSecrets)) }}
+imagePullSecrets:
+ {{- range $pullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names evaluating values as templates
+{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }}
+*/}}
+{{- define "common.images.renderPullSecrets" -}}
+ {{- $pullSecrets := list }}
+ {{- $context := .context }}
+
+ {{- if $context.Values.global }}
+ {{- range $context.Values.global.imagePullSecrets -}}
+ {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- range .images -}}
+ {{- range .pullSecrets -}}
+ {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- if (not (empty $pullSecrets)) }}
+imagePullSecrets:
+ {{- range $pullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/charts/common/templates/_ingress.tpl b/charts/penpot/charts/postgresql/charts/common/templates/_ingress.tpl
new file mode 100644
index 0000000..831da9c
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/templates/_ingress.tpl
@@ -0,0 +1,68 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Generate backend entry that is compatible with all Kubernetes API versions.
+
+Usage:
+{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }}
+
+Params:
+ - serviceName - String. Name of an existing service backend
+ - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer.
+ - context - Dict - Required. The context for the template evaluation.
+*/}}
+{{- define "common.ingress.backend" -}}
+{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}}
+{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}}
+serviceName: {{ .serviceName }}
+servicePort: {{ .servicePort }}
+{{- else -}}
+service:
+ name: {{ .serviceName }}
+ port:
+ {{- if typeIs "string" .servicePort }}
+ name: {{ .servicePort }}
+ {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }}
+ number: {{ .servicePort | int }}
+ {{- end }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Print "true" if the API pathType field is supported
+Usage:
+{{ include "common.ingress.supportsPathType" . }}
+*/}}
+{{- define "common.ingress.supportsPathType" -}}
+{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}}
+{{- print "false" -}}
+{{- else -}}
+{{- print "true" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if the ingressClassname field is supported
+Usage:
+{{ include "common.ingress.supportsIngressClassname" . }}
+*/}}
+{{- define "common.ingress.supportsIngressClassname" -}}
+{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "false" -}}
+{{- else -}}
+{{- print "true" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if cert-manager required annotations for TLS signed
+certificates are set in the Ingress annotations
+Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
+Usage:
+{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }}
+*/}}
+{{- define "common.ingress.certManagerRequest" -}}
+{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/charts/common/templates/_labels.tpl b/charts/penpot/charts/postgresql/charts/common/templates/_labels.tpl
new file mode 100644
index 0000000..252066c
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/templates/_labels.tpl
@@ -0,0 +1,18 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Kubernetes standard labels
+*/}}
+{{- define "common.labels.standard" -}}
+app.kubernetes.io/name: {{ include "common.names.name" . }}
+helm.sh/chart: {{ include "common.names.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector
+*/}}
+{{- define "common.labels.matchLabels" -}}
+app.kubernetes.io/name: {{ include "common.names.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/charts/common/templates/_names.tpl b/charts/penpot/charts/postgresql/charts/common/templates/_names.tpl
new file mode 100644
index 0000000..617a234
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/templates/_names.tpl
@@ -0,0 +1,66 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "common.names.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "common.names.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "common.names.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified dependency name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+Usage:
+{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }}
+*/}}
+{{- define "common.names.dependency.fullname" -}}
+{{- if .chartValues.fullnameOverride -}}
+{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .chartName .chartValues.nameOverride -}}
+{{- if contains $name .context.Release.Name -}}
+{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Allow the release namespace to be overridden for multi-namespace deployments in combined charts.
+*/}}
+{{- define "common.names.namespace" -}}
+{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a fully qualified app name adding the installation's namespace.
+*/}}
+{{- define "common.names.fullname.namespace" -}}
+{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/charts/common/templates/_secrets.tpl b/charts/penpot/charts/postgresql/charts/common/templates/_secrets.tpl
new file mode 100644
index 0000000..a1708b2
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/templates/_secrets.tpl
@@ -0,0 +1,165 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Generate secret name.
+
+Usage:
+{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }}
+
+Params:
+ - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
+ to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+ +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
+ - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment.
+ - context - Dict - Required. The context for the template evaluation.
+*/}}
+{{- define "common.secrets.name" -}}
+{{- $name := (include "common.names.fullname" .context) -}}
+
+{{- if .defaultNameSuffix -}}
+{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- with .existingSecret -}}
+{{- if not (typeIs "string" .) -}}
+{{- with .name -}}
+{{- $name = . -}}
+{{- end -}}
+{{- else -}}
+{{- $name = . -}}
+{{- end -}}
+{{- end -}}
+
+{{- printf "%s" $name -}}
+{{- end -}}
+
+{{/*
+Generate secret key.
+
+Usage:
+{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }}
+
+Params:
+ - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
+ to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+ +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
+ - key - String - Required. Name of the key in the secret.
+*/}}
+{{- define "common.secrets.key" -}}
+{{- $key := .key -}}
+
+{{- if .existingSecret -}}
+ {{- if not (typeIs "string" .existingSecret) -}}
+ {{- if .existingSecret.keyMapping -}}
+ {{- $key = index .existingSecret.keyMapping $.key -}}
+ {{- end -}}
+ {{- end }}
+{{- end -}}
+
+{{- printf "%s" $key -}}
+{{- end -}}
+
+{{/*
+Generate secret password or retrieve one if already created.
+
+Usage:
+{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }}
+
+Params:
+ - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+ - key - String - Required - Name of the key in the secret.
+ - providedValues - List<String> - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
+ - length - int - Optional - Length of the generated random password.
+ - strong - Boolean - Optional - Whether to add symbols to the generated random password.
+ - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart.
+ - context - Context - Required - Parent context.
+
+The order in which this function returns a secret password:
+ 1. Already existing 'Secret' resource
+ (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned)
+ 2. Password provided via the values.yaml
+ (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned)
+ 3. Randomly generated secret password
+ (A new random secret password with the length specified in the 'length' parameter will be generated and returned)
+
+*/}}
+{{- define "common.secrets.passwords.manage" -}}
+
+{{- $password := "" }}
+{{- $subchart := "" }}
+{{- $chartName := default "" .chartName }}
+{{- $passwordLength := default 10 .length }}
+{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }}
+{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }}
+{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }}
+{{- if $secretData }}
+ {{- if hasKey $secretData .key }}
+ {{- $password = index $secretData .key | quote }}
+ {{- else }}
+ {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}}
+ {{- end -}}
+{{- else if $providedPasswordValue }}
+ {{- $password = $providedPasswordValue | toString | b64enc | quote }}
+{{- else }}
+
+ {{- if .context.Values.enabled }}
+ {{- $subchart = $chartName }}
+ {{- end -}}
+
+ {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}}
+ {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}}
+ {{- $passwordValidationErrors := list $requiredPasswordError -}}
+ {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}}
+
+ {{- if .strong }}
+ {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }}
+ {{- $password = randAscii $passwordLength }}
+ {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }}
+ {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }}
+ {{- else }}
+ {{- $password = randAlphaNum $passwordLength | b64enc | quote }}
+ {{- end }}
+{{- end -}}
+{{- printf "%s" $password -}}
+{{- end -}}
+
+{{/*
+Reuses the value from an existing secret, otherwise sets its value to a default value.
+
+Usage:
+{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }}
+
+Params:
+ - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+ - key - String - Required - Name of the key in the secret.
+ - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
+ - context - Context - Required - Parent context.
+
+*/}}
+{{- define "common.secrets.lookup" -}}
+{{- $value := "" -}}
+{{- $defaultValue := required "\n'common.secrets.lookup': Argument 'defaultValue' missing or empty" .defaultValue -}}
+{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}}
+{{- if and $secretData (hasKey $secretData .key) -}}
+ {{- $value = index $secretData .key -}}
+{{- else -}}
+ {{- $value = $defaultValue | toString | b64enc -}}
+{{- end -}}
+{{- printf "%s" $value -}}
+{{- end -}}
+
+{{/*
+Returns whether a previous generated secret already exists
+
+Usage:
+{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }}
+
+Params:
+ - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+ - context - Context - Required - Parent context.
+*/}}
+{{- define "common.secrets.exists" -}}
+{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }}
+{{- if $secret }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/charts/common/templates/_storage.tpl b/charts/penpot/charts/postgresql/charts/common/templates/_storage.tpl
new file mode 100644
index 0000000..60e2a84
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/templates/_storage.tpl
@@ -0,0 +1,23 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Return the proper Storage Class
+{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }}
+*/}}
+{{- define "common.storage.class" -}}
+
+{{- $storageClass := .persistence.storageClass -}}
+{{- if .global -}}
+ {{- if .global.storageClass -}}
+ {{- $storageClass = .global.storageClass -}}
+ {{- end -}}
+{{- end -}}
+
+{{- if $storageClass -}}
+ {{- if (eq "-" $storageClass) -}}
+ {{- printf "storageClassName: \"\"" -}}
+ {{- else }}
+ {{- printf "storageClassName: %s" $storageClass -}}
+ {{- end -}}
+{{- end -}}
+
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/charts/common/templates/_tplvalues.tpl b/charts/penpot/charts/postgresql/charts/common/templates/_tplvalues.tpl
new file mode 100644
index 0000000..2db1668
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/templates/_tplvalues.tpl
@@ -0,0 +1,13 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Renders a value that contains template.
+Usage:
+{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }}
+*/}}
+{{- define "common.tplvalues.render" -}}
+ {{- if typeIs "string" .value }}
+ {{- tpl .value .context }}
+ {{- else }}
+ {{- tpl (.value | toYaml) .context }}
+ {{- end }}
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/charts/common/templates/_utils.tpl b/charts/penpot/charts/postgresql/charts/common/templates/_utils.tpl
new file mode 100644
index 0000000..b1ead50
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/templates/_utils.tpl
@@ -0,0 +1,62 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Print instructions to get a secret value.
+Usage:
+{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }}
+*/}}
+{{- define "common.utils.secret.getvalue" -}}
+{{- $varname := include "common.utils.fieldToEnvVar" . -}}
+export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d)
+{{- end -}}
+
+{{/*
+Build env var name given a field
+Usage:
+{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }}
+*/}}
+{{- define "common.utils.fieldToEnvVar" -}}
+ {{- $fieldNameSplit := splitList "-" .field -}}
+ {{- $upperCaseFieldNameSplit := list -}}
+
+ {{- range $fieldNameSplit -}}
+ {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}}
+ {{- end -}}
+
+ {{ join "_" $upperCaseFieldNameSplit }}
+{{- end -}}
+
+{{/*
+Gets a value from .Values given
+Usage:
+{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }}
+*/}}
+{{- define "common.utils.getValueFromKey" -}}
+{{- $splitKey := splitList "." .key -}}
+{{- $value := "" -}}
+{{- $latestObj := $.context.Values -}}
+{{- range $splitKey -}}
+ {{- if not $latestObj -}}
+ {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}}
+ {{- end -}}
+ {{- $value = ( index $latestObj . ) -}}
+ {{- $latestObj = $value -}}
+{{- end -}}
+{{- printf "%v" (default "" $value) -}}
+{{- end -}}
+
+{{/*
+Returns first .Values key with a defined value or first of the list if all non-defined
+Usage:
+{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }}
+*/}}
+{{- define "common.utils.getKeyFromList" -}}
+{{- $key := first .keys -}}
+{{- $reverseKeys := reverse .keys }}
+{{- range $reverseKeys }}
+ {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }}
+ {{- if $value -}}
+ {{- $key = . }}
+ {{- end -}}
+{{- end -}}
+{{- printf "%s" $key -}}
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/charts/common/templates/_warnings.tpl b/charts/penpot/charts/postgresql/charts/common/templates/_warnings.tpl
new file mode 100644
index 0000000..ae10fa4
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/templates/_warnings.tpl
@@ -0,0 +1,14 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Warning about using rolling tag.
+Usage:
+{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }}
+*/}}
+{{- define "common.warnings.rollingTag" -}}
+
+{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }}
+WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/
+{{- end }}
+
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/charts/common/templates/validations/_cassandra.tpl b/charts/penpot/charts/postgresql/charts/common/templates/validations/_cassandra.tpl
new file mode 100644
index 0000000..ded1ae3
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/templates/validations/_cassandra.tpl
@@ -0,0 +1,72 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate Cassandra required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret"
+ - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.cassandra.passwords" -}}
+ {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}}
+ {{- $enabled := include "common.cassandra.values.enabled" . -}}
+ {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}}
+ {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.cassandra.values.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.cassandra.values.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.cassandra.dbUser.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.dbUser.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled cassandra.
+
+Usage:
+{{ include "common.cassandra.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.cassandra.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.cassandra.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key dbUser
+
+Usage:
+{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.cassandra.values.key.dbUser" -}}
+ {{- if .subchart -}}
+ cassandra.dbUser
+ {{- else -}}
+ dbUser
+ {{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/charts/common/templates/validations/_mariadb.tpl b/charts/penpot/charts/postgresql/charts/common/templates/validations/_mariadb.tpl
new file mode 100644
index 0000000..b6906ff
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/templates/validations/_mariadb.tpl
@@ -0,0 +1,103 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MariaDB required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret"
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mariadb.passwords" -}}
+ {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}}
+ {{- $enabled := include "common.mariadb.values.enabled" . -}}
+ {{- $architecture := include "common.mariadb.values.architecture" . -}}
+ {{- $authPrefix := include "common.mariadb.values.key.auth" . -}}
+ {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+ {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+ {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+ {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+ {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+ {{- if not (empty $valueUsername) -}}
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+ {{- end -}}
+
+ {{- if (eq $architecture "replication") -}}
+ {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.auth.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mariadb.auth.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.auth.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mariadb.
+
+Usage:
+{{ include "common.mariadb.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mariadb.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.mariadb.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.architecture" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mariadb.architecture -}}
+ {{- else -}}
+ {{- .context.Values.architecture -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.key.auth" -}}
+ {{- if .subchart -}}
+ mariadb.auth
+ {{- else -}}
+ auth
+ {{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/charts/common/templates/validations/_mongodb.tpl b/charts/penpot/charts/postgresql/charts/common/templates/validations/_mongodb.tpl
new file mode 100644
index 0000000..f820ec1
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/templates/validations/_mongodb.tpl
@@ -0,0 +1,108 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MongoDB® required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret"
+ - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mongodb.passwords" -}}
+ {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}}
+ {{- $enabled := include "common.mongodb.values.enabled" . -}}
+ {{- $authPrefix := include "common.mongodb.values.key.auth" . -}}
+ {{- $architecture := include "common.mongodb.values.architecture" . -}}
+ {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+ {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+ {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}}
+ {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+ {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}}
+ {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}}
+
+ {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+ {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+ {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }}
+ {{- if and $valueUsername $valueDatabase -}}
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+ {{- end -}}
+
+ {{- if (eq $architecture "replicaset") -}}
+ {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.auth.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mongodb.auth.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.auth.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mongodb.
+
+Usage:
+{{ include "common.mongodb.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mongodb.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.mongodb.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.key.auth" -}}
+ {{- if .subchart -}}
+ mongodb.auth
+ {{- else -}}
+ auth
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.architecture" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mongodb.architecture -}}
+ {{- else -}}
+ {{- .context.Values.architecture -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/charts/common/templates/validations/_mysql.tpl b/charts/penpot/charts/postgresql/charts/common/templates/validations/_mysql.tpl
new file mode 100644
index 0000000..74472a0
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/templates/validations/_mysql.tpl
@@ -0,0 +1,103 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MySQL required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret"
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mysql.passwords" -}}
+ {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}}
+ {{- $enabled := include "common.mysql.values.enabled" . -}}
+ {{- $architecture := include "common.mysql.values.architecture" . -}}
+ {{- $authPrefix := include "common.mysql.values.key.auth" . -}}
+ {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+ {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+ {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+ {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+ {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+ {{- if not (empty $valueUsername) -}}
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+ {{- end -}}
+
+ {{- if (eq $architecture "replication") -}}
+ {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.auth.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mysql.auth.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.auth.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mysql.
+
+Usage:
+{{ include "common.mysql.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mysql.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.mysql.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.architecture" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mysql.architecture -}}
+ {{- else -}}
+ {{- .context.Values.architecture -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.key.auth" -}}
+ {{- if .subchart -}}
+ mysql.auth
+ {{- else -}}
+ auth
+ {{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/charts/common/templates/validations/_postgresql.tpl b/charts/penpot/charts/postgresql/charts/common/templates/validations/_postgresql.tpl
new file mode 100644
index 0000000..164ec0d
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/templates/validations/_postgresql.tpl
@@ -0,0 +1,129 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate PostgreSQL required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret"
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.postgresql.passwords" -}}
+ {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}}
+ {{- $enabled := include "common.postgresql.values.enabled" . -}}
+ {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}}
+ {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}}
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+ {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}}
+
+ {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}}
+ {{- if (eq $enabledReplication "true") -}}
+ {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to decide whether evaluate global values.
+
+Usage:
+{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }}
+Params:
+ - key - String - Required. Field to be evaluated within global, e.g: "existingSecret"
+*/}}
+{{- define "common.postgresql.values.use.global" -}}
+ {{- if .context.Values.global -}}
+ {{- if .context.Values.global.postgresql -}}
+ {{- index .context.Values.global.postgresql .key | quote -}}
+ {{- end -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.postgresql.values.existingSecret" (dict "context" $) }}
+*/}}
+{{- define "common.postgresql.values.existingSecret" -}}
+ {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}}
+
+ {{- if .subchart -}}
+ {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}}
+ {{- else -}}
+ {{- default (.context.Values.existingSecret | quote) $globalValue -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled postgresql.
+
+Usage:
+{{ include "common.postgresql.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.postgresql.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.postgresql.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key postgressPassword.
+
+Usage:
+{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.key.postgressPassword" -}}
+ {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}}
+
+ {{- if not $globalValue -}}
+ {{- if .subchart -}}
+ postgresql.postgresqlPassword
+ {{- else -}}
+ postgresqlPassword
+ {{- end -}}
+ {{- else -}}
+ global.postgresql.postgresqlPassword
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled.replication.
+
+Usage:
+{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.enabled.replication" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.postgresql.replication.enabled -}}
+ {{- else -}}
+ {{- printf "%v" .context.Values.replication.enabled -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key replication.password.
+
+Usage:
+{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.key.replicationPassword" -}}
+ {{- if .subchart -}}
+ postgresql.replication.password
+ {{- else -}}
+ replication.password
+ {{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/charts/common/templates/validations/_redis.tpl b/charts/penpot/charts/postgresql/charts/common/templates/validations/_redis.tpl
new file mode 100644
index 0000000..dcccfc1
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/templates/validations/_redis.tpl
@@ -0,0 +1,76 @@
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate Redis® required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret"
+ - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.redis.passwords" -}}
+ {{- $enabled := include "common.redis.values.enabled" . -}}
+ {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}}
+ {{- $standarizedVersion := include "common.redis.values.standarized.version" . }}
+
+ {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }}
+ {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }}
+
+ {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }}
+ {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}}
+ {{- if eq $useAuth "true" -}}
+ {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled redis.
+
+Usage:
+{{ include "common.redis.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.redis.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.redis.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right prefix path for the values
+
+Usage:
+{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false
+*/}}
+{{- define "common.redis.values.keys.prefix" -}}
+ {{- if .subchart -}}redis.{{- else -}}{{- end -}}
+{{- end -}}
+
+{{/*
+Checks whether the redis chart's includes the standarizations (version >= 14)
+
+Usage:
+{{ include "common.redis.values.standarized.version" (dict "context" $) }}
+*/}}
+{{- define "common.redis.values.standarized.version" -}}
+
+ {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}}
+ {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }}
+
+ {{- if $standarizedAuthValues -}}
+ {{- true -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/charts/common/templates/validations/_validations.tpl b/charts/penpot/charts/postgresql/charts/common/templates/validations/_validations.tpl
new file mode 100644
index 0000000..9a814cf
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/templates/validations/_validations.tpl
@@ -0,0 +1,46 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate values must not be empty.
+
+Usage:
+{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}}
+{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}}
+{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
+
+Validate value params:
+ - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password"
+ - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret"
+ - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password"
+*/}}
+{{- define "common.validations.values.multiple.empty" -}}
+ {{- range .required -}}
+ {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Validate a value must not be empty.
+
+Usage:
+{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }}
+
+Validate value params:
+ - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password"
+ - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret"
+ - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password"
+ - subchart - String - Optional - Name of the subchart that the validated password is part of.
+*/}}
+{{- define "common.validations.values.single.empty" -}}
+ {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }}
+ {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }}
+
+ {{- if not $value -}}
+ {{- $varname := "my-value" -}}
+ {{- $getCurrentValue := "" -}}
+ {{- if and .secret .field -}}
+ {{- $varname = include "common.utils.fieldToEnvVar" . -}}
+ {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}}
+ {{- end -}}
+ {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/charts/common/values.yaml b/charts/penpot/charts/postgresql/charts/common/values.yaml
new file mode 100644
index 0000000..f2df68e
--- /dev/null
+++ b/charts/penpot/charts/postgresql/charts/common/values.yaml
@@ -0,0 +1,5 @@
+## bitnami/common
+## It is required by CI/CD tools and processes.
+## @skip exampleValue
+##
+exampleValue: common-chart
diff --git a/charts/penpot/charts/postgresql/templates/NOTES.txt b/charts/penpot/charts/postgresql/templates/NOTES.txt
new file mode 100644
index 0000000..e0474d4
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/NOTES.txt
@@ -0,0 +1,89 @@
+CHART NAME: {{ .Chart.Name }}
+CHART VERSION: {{ .Chart.Version }}
+APP VERSION: {{ .Chart.AppVersion }}
+
+** Please be patient while the chart is being deployed **
+
+{{- if .Values.diagnosticMode.enabled }}
+The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with:
+
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }}
+
+Get the list of pods by executing:
+
+ kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}
+
+Access the pod you want to debug by executing
+
+ kubectl exec --namespace {{ .Release.Namespace }} -ti <NAME OF THE POD> -- /opt/bitnami/scripts/postgresql/entrypoint.sh /bin/bash
+
+In order to replicate the container startup scripts execute this command:
+
+ /opt/bitnami/scripts/postgresql/entrypoint.sh /opt/bitnami/scripts/postgresql/run.sh
+
+{{- else }}
+
+PostgreSQL can be accessed via port {{ include "postgresql.service.port" . }} on the following DNS names from within your cluster:
+
+ {{ include "postgresql.primary.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection
+
+{{- if eq .Values.architecture "replication" }}
+
+ {{ include "postgresql.readReplica.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read only connection
+
+{{- end }}
+
+{{- $customUser := include "postgresql.username" . }}
+{{- if and (not (empty $customUser)) (ne $customUser "postgres") .Values.auth.enablePostgresUser }}
+
+To get the password for "postgres" run:
+
+ export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.secretName" . }} -o jsonpath="{.data.postgres-password}" | base64 -d)
+
+To get the password for "{{ $customUser }}" run:
+
+ export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.secretName" . }} -o jsonpath="{.data.password}" | base64 -d)
+
+{{- else }}
+
+To get the password for "{{ default "postgres" $customUser }}" run:
+
+ export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.secretName" . }} -o jsonpath="{.data.{{ ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres")) }}}" | base64 -d)
+
+{{- end }}
+
+To connect to your database run the following command:
+
+ kubectl run {{ include "common.names.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ include "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" \
+ --command -- psql --host {{ include "postgresql.primary.fullname" . }} -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }} -p {{ include "postgresql.service.port" . }}
+
+ > NOTE: If you access the container using bash, make sure that you execute "/opt/bitnami/scripts/postgresql/entrypoint.sh /bin/bash" in order to avoid the error "psql: local user with ID {{ .Values.primary.containerSecurityContext.runAsUser }}} does not exist"
+
+To connect to your database from outside the cluster execute the following commands:
+
+{{- if contains "NodePort" .Values.primary.service.type }}
+
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "postgresql.primary.fullname" . }})
+ PGPASSWORD="$POSTGRES_PASSWORD" psql --host $NODE_IP --port $NODE_PORT -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }}
+
+{{- else if contains "LoadBalancer" .Values.primary.service.type }}
+
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "postgresql.primary.fullname" . }}'
+
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "postgresql.primary.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
+ PGPASSWORD="$POSTGRES_PASSWORD" psql --host $SERVICE_IP --port {{ include "postgresql.service.port" . }} -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }}
+
+{{- else if contains "ClusterIP" .Values.primary.service.type }}
+
+ kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "postgresql.primary.fullname" . }} {{ include "postgresql.service.port" . }}:{{ include "postgresql.service.port" . }} &
+ PGPASSWORD="$POSTGRES_PASSWORD" psql --host 127.0.0.1 -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }} -p {{ include "postgresql.service.port" . }}
+
+{{- end }}
+{{- end }}
+
+{{- include "postgresql.validateValues" . -}}
+{{- include "common.warnings.rollingTag" .Values.image -}}
+{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }}
diff --git a/charts/penpot/charts/postgresql/templates/_helpers.tpl b/charts/penpot/charts/postgresql/templates/_helpers.tpl
new file mode 100644
index 0000000..8189380
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/_helpers.tpl
@@ -0,0 +1,399 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Create a default fully qualified app name for PostgreSQL Primary objects
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "postgresql.primary.fullname" -}}
+{{- if eq .Values.architecture "replication" }}
+ {{- printf "%s-%s" (include "common.names.fullname" .) .Values.primary.name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+ {{- include "common.names.fullname" . -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name for PostgreSQL read-only replicas objects
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "postgresql.readReplica.fullname" -}}
+{{- printf "%s-%s" (include "common.names.fullname" .) .Values.readReplicas.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create the default FQDN for PostgreSQL primary headless service
+We truncate at 63 chars because of the DNS naming spec.
+*/}}
+{{- define "postgresql.primary.svc.headless" -}}
+{{- printf "%s-hl" (include "postgresql.primary.fullname" .) | trunc 63 | trimSuffix "-" }}
+{{- end -}}
+
+{{/*
+Create the default FQDN for PostgreSQL read-only replicas headless service
+We truncate at 63 chars because of the DNS naming spec.
+*/}}
+{{- define "postgresql.readReplica.svc.headless" -}}
+{{- printf "%s-hl" (include "postgresql.readReplica.fullname" .) | trunc 63 | trimSuffix "-" }}
+{{- end -}}
+
+{{/*
+Return the proper PostgreSQL image name
+*/}}
+{{- define "postgresql.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper PostgreSQL metrics image name
+*/}}
+{{- define "postgresql.metrics.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper image name (for the init container volume-permissions image)
+*/}}
+{{- define "postgresql.volumePermissions.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names
+*/}}
+{{- define "postgresql.imagePullSecrets" -}}
+{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.volumePermissions.image) "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the name for a custom user to create
+*/}}
+{{- define "postgresql.username" -}}
+{{- if .Values.global.postgresql.auth.username }}
+ {{- .Values.global.postgresql.auth.username -}}
+{{- else -}}
+ {{- .Values.auth.username -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the name for a custom database to create
+*/}}
+{{- define "postgresql.database" -}}
+{{- if .Values.global.postgresql.auth.database }}
+ {{- printf "%s" (tpl .Values.global.postgresql.auth.database $) -}}
+{{- else if .Values.auth.database -}}
+ {{- printf "%s" (tpl .Values.auth.database $) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the password secret.
+*/}}
+{{- define "postgresql.secretName" -}}
+{{- if .Values.global.postgresql.auth.existingSecret }}
+ {{- printf "%s" (tpl .Values.global.postgresql.auth.existingSecret $) -}}
+{{- else if .Values.auth.existingSecret -}}
+ {{- printf "%s" (tpl .Values.auth.existingSecret $) -}}
+{{- else -}}
+ {{- printf "%s" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the replication-password key.
+*/}}
+{{- define "postgresql.replicationPasswordKey" -}}
+{{- if or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret }}
+ {{- if .Values.global.postgresql.auth.secretKeys.replicationPasswordKey }}
+ {{- printf "%s" (tpl .Values.global.postgresql.auth.secretKeys.replicationPasswordKey $) -}}
+ {{- else if .Values.auth.secretKeys.replicationPasswordKey -}}
+ {{- printf "%s" (tpl .Values.auth.secretKeys.replicationPasswordKey $) -}}
+ {{- else -}}
+ {{- "replication-password" -}}
+ {{- end -}}
+{{- else -}}
+ {{- "replication-password" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the admin-password key.
+*/}}
+{{- define "postgresql.adminPasswordKey" -}}
+{{- if or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret }}
+ {{- if .Values.global.postgresql.auth.secretKeys.adminPasswordKey }}
+ {{- printf "%s" (tpl .Values.global.postgresql.auth.secretKeys.adminPasswordKey $) -}}
+ {{- else if .Values.auth.secretKeys.adminPasswordKey -}}
+ {{- printf "%s" (tpl .Values.auth.secretKeys.adminPasswordKey $) -}}
+ {{- end -}}
+{{- else -}}
+ {{- "postgres-password" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the user-password key.
+*/}}
+{{- define "postgresql.userPasswordKey" -}}
+{{- if or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret }}
+ {{- if or (empty (include "postgresql.username" .)) (eq (include "postgresql.username" .) "postgres") }}
+ {{- printf "%s" (include "postgresql.adminPasswordKey" .) -}}
+ {{- else -}}
+ {{- if .Values.global.postgresql.auth.secretKeys.userPasswordKey }}
+ {{- printf "%s" (tpl .Values.global.postgresql.auth.secretKeys.userPasswordKey $) -}}
+ {{- else if .Values.auth.secretKeys.userPasswordKey -}}
+ {{- printf "%s" (tpl .Values.auth.secretKeys.userPasswordKey $) -}}
+ {{- end -}}
+ {{- end -}}
+{{- else -}}
+ {{- ternary "password" "postgres-password" (and (not (empty (include "postgresql.username" .))) (ne (include "postgresql.username" .) "postgres")) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a secret object should be created
+*/}}
+{{- define "postgresql.createSecret" -}}
+{{- if not (or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret) -}}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return PostgreSQL service port
+*/}}
+{{- define "postgresql.service.port" -}}
+{{- if .Values.global.postgresql.service.ports.postgresql }}
+ {{- .Values.global.postgresql.service.ports.postgresql -}}
+{{- else -}}
+ {{- .Values.primary.service.ports.postgresql -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return PostgreSQL service port
+*/}}
+{{- define "postgresql.readReplica.service.port" -}}
+{{- if .Values.global.postgresql.service.ports.postgresql }}
+ {{- .Values.global.postgresql.service.ports.postgresql -}}
+{{- else -}}
+ {{- .Values.readReplicas.service.ports.postgresql -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the PostgreSQL primary configuration ConfigMap name.
+*/}}
+{{- define "postgresql.primary.configmapName" -}}
+{{- if .Values.primary.existingConfigmap -}}
+ {{- printf "%s" (tpl .Values.primary.existingConfigmap $) -}}
+{{- else -}}
+ {{- printf "%s-configuration" (include "postgresql.primary.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a configmap object should be created for PostgreSQL primary with the configuration
+*/}}
+{{- define "postgresql.primary.createConfigmap" -}}
+{{- if and (or .Values.primary.configuration .Values.primary.pgHbaConfiguration) (not .Values.primary.existingConfigmap) }}
+ {{- true -}}
+{{- else -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the PostgreSQL primary extended configuration ConfigMap name.
+*/}}
+{{- define "postgresql.primary.extendedConfigmapName" -}}
+{{- if .Values.primary.existingExtendedConfigmap -}}
+ {{- printf "%s" (tpl .Values.primary.existingExtendedConfigmap $) -}}
+{{- else -}}
+ {{- printf "%s-extended-configuration" (include "postgresql.primary.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the PostgreSQL read replica extended configuration ConfigMap name.
+*/}}
+{{- define "postgresql.readReplicas.extendedConfigmapName" -}}
+ {{- printf "%s-extended-configuration" (include "postgresql.readReplica.fullname" .) -}}
+{{- end -}}
+
+{{/*
+Return true if a configmap object should be created for PostgreSQL primary with the extended configuration
+*/}}
+{{- define "postgresql.primary.createExtendedConfigmap" -}}
+{{- if and .Values.primary.extendedConfiguration (not .Values.primary.existingExtendedConfigmap) }}
+ {{- true -}}
+{{- else -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a configmap object should be created for PostgreSQL read replica with the extended configuration
+*/}}
+{{- define "postgresql.readReplicas.createExtendedConfigmap" -}}
+{{- if .Values.readReplicas.extendedConfiguration }}
+ {{- true -}}
+{{- else -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+ Create the name of the service account to use
+ */}}
+{{- define "postgresql.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a configmap should be mounted with PostgreSQL configuration
+*/}}
+{{- define "postgresql.mountConfigurationCM" -}}
+{{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the initialization scripts ConfigMap name.
+*/}}
+{{- define "postgresql.initdb.scriptsCM" -}}
+{{- if .Values.primary.initdb.scriptsConfigMap -}}
+ {{- printf "%s" (tpl .Values.primary.initdb.scriptsConfigMap $) -}}
+{{- else -}}
+ {{- printf "%s-init-scripts" (include "postgresql.primary.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{/*
+Return true if TLS is enabled for LDAP connection
+*/}}
+{{- define "postgresql.ldap.tls.enabled" -}}
+{{- if and (kindIs "string" .Values.ldap.tls) (not (empty .Values.ldap.tls)) }}
+ {{- true -}}
+{{- else if and (kindIs "map" .Values.ldap.tls) .Values.ldap.tls.enabled }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the readiness probe command
+*/}}
+{{- define "postgresql.readinessProbeCommand" -}}
+{{- $customUser := include "postgresql.username" . }}
+- |
+{{- if (include "postgresql.database" .) }}
+ exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if .Values.tls.enabled }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+{{- else }}
+ exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if .Values.tls.enabled }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+{{- end }}
+{{- if contains "bitnami/" .Values.image.repository }}
+ [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ]
+{{- end -}}
+{{- end -}}
+
+{{/*
+Compile all warnings into a single message, and call fail.
+*/}}
+{{- define "postgresql.validateValues" -}}
+{{- $messages := list -}}
+{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}}
+{{- $messages := append $messages (include "postgresql.validateValues.psp" .) -}}
+{{- $messages := without $messages "" -}}
+{{- $message := join "\n" $messages -}}
+
+{{- if $message -}}
+{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap
+*/}}
+{{- define "postgresql.validateValues.ldapConfigurationMethod" -}}
+{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }}
+postgresql: ldap.url, ldap.server
+ You cannot set both `ldap.url` and `ldap.server` at the same time.
+ Please provide a unique way to configure LDAP.
+ More info at https://www.postgresql.org/docs/current/auth-ldap.html
+{{- end -}}
+{{- end -}}
+
+{{/*
+Validate values of Postgresql - If PSP is enabled RBAC should be enabled too
+*/}}
+{{- define "postgresql.validateValues.psp" -}}
+{{- if and .Values.psp.create (not .Values.rbac.create) }}
+postgresql: psp.create, rbac.create
+ RBAC should be enabled if PSP is enabled in order for PSP to work.
+ More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the cert file.
+*/}}
+{{- define "postgresql.tlsCert" -}}
+{{- if .Values.tls.autoGenerated }}
+ {{- printf "/opt/bitnami/postgresql/certs/tls.crt" -}}
+{{- else -}}
+ {{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/postgresql/certs/%s" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the cert key file.
+*/}}
+{{- define "postgresql.tlsCertKey" -}}
+{{- if .Values.tls.autoGenerated }}
+ {{- printf "/opt/bitnami/postgresql/certs/tls.key" -}}
+{{- else -}}
+{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/postgresql/certs/%s" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the CA cert file.
+*/}}
+{{- define "postgresql.tlsCACert" -}}
+{{- if .Values.tls.autoGenerated }}
+ {{- printf "/opt/bitnami/postgresql/certs/ca.crt" -}}
+{{- else -}}
+ {{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.certCAFilename -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the CRL file.
+*/}}
+{{- define "postgresql.tlsCRL" -}}
+{{- if .Values.tls.crlFilename -}}
+{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.crlFilename -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a TLS credentials secret object should be created
+*/}}
+{{- define "postgresql.createTlsSecret" -}}
+{{- if and .Values.tls.autoGenerated (not .Values.tls.certificatesSecret) }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the CA cert file.
+*/}}
+{{- define "postgresql.tlsSecretName" -}}
+{{- if .Values.tls.autoGenerated }}
+ {{- printf "%s-crt" (include "common.names.fullname" .) -}}
+{{- else -}}
+ {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }}
+{{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/templates/extra-list.yaml b/charts/penpot/charts/postgresql/templates/extra-list.yaml
new file mode 100644
index 0000000..9ac65f9
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/extra-list.yaml
@@ -0,0 +1,4 @@
+{{- range .Values.extraDeploy }}
+---
+{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/networkpolicy-egress.yaml b/charts/penpot/charts/postgresql/templates/networkpolicy-egress.yaml
new file mode 100644
index 0000000..e862147
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/networkpolicy-egress.yaml
@@ -0,0 +1,32 @@
+{{- if and .Values.networkPolicy.enabled (or .Values.networkPolicy.egressRules.denyConnectionsToExternal .Values.networkPolicy.egressRules.customRules) }}
+apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }}
+kind: NetworkPolicy
+metadata:
+ name: {{ printf "%s-egress" (include "common.names.fullname" .) }}
+ namespace: {{ .Release.Namespace }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ policyTypes:
+ - Egress
+ egress:
+ {{- if .Values.networkPolicy.egressRules.denyConnectionsToExternal }}
+ - ports:
+ - port: 53
+ protocol: UDP
+ - port: 53
+ protocol: TCP
+ - to:
+ - namespaceSelector: {}
+ {{- end }}
+ {{- if .Values.networkPolicy.egressRules.customRules }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.egressRules.customRules "context" $) | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/primary/configmap.yaml b/charts/penpot/charts/postgresql/templates/primary/configmap.yaml
new file mode 100644
index 0000000..d654a22
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/primary/configmap.yaml
@@ -0,0 +1,24 @@
+{{- if (include "postgresql.primary.createConfigmap" .) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-configuration" (include "postgresql.primary.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: primary
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+ {{- if .Values.primary.configuration }}
+ postgresql.conf: |-
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.configuration "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.primary.pgHbaConfiguration }}
+ pg_hba.conf: |
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.pgHbaConfiguration "context" $ ) | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/primary/extended-configmap.yaml b/charts/penpot/charts/postgresql/templates/primary/extended-configmap.yaml
new file mode 100644
index 0000000..d129bd3
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/primary/extended-configmap.yaml
@@ -0,0 +1,18 @@
+{{- if (include "postgresql.primary.createExtendedConfigmap" .) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-extended-configuration" (include "postgresql.primary.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: primary
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+ override.conf: |-
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.extendedConfiguration "context" $ ) | nindent 4 }}
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/primary/initialization-configmap.yaml b/charts/penpot/charts/postgresql/templates/primary/initialization-configmap.yaml
new file mode 100644
index 0000000..d3d26cb
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/primary/initialization-configmap.yaml
@@ -0,0 +1,15 @@
+{{- if and .Values.primary.initdb.scripts (not .Values.primary.initdb.scriptsConfigMap) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-init-scripts" (include "postgresql.primary.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data: {{- include "common.tplvalues.render" (dict "value" .Values.primary.initdb.scripts "context" .) | nindent 2 }}
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/primary/metrics-configmap.yaml b/charts/penpot/charts/postgresql/templates/primary/metrics-configmap.yaml
new file mode 100644
index 0000000..8ad2f35
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/primary/metrics-configmap.yaml
@@ -0,0 +1,16 @@
+{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-metrics" (include "postgresql.primary.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+ custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }}
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/primary/metrics-svc.yaml b/charts/penpot/charts/postgresql/templates/primary/metrics-svc.yaml
new file mode 100644
index 0000000..75a1b81
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/primary/metrics-svc.yaml
@@ -0,0 +1,31 @@
+{{- if .Values.metrics.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ printf "%s-metrics" (include "postgresql.primary.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: metrics
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ annotations:
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.metrics.service.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ type: ClusterIP
+ sessionAffinity: {{ .Values.metrics.service.sessionAffinity }}
+ {{- if .Values.metrics.service.clusterIP }}
+ clusterIP: {{ .Values.metrics.service.clusterIP }}
+ {{- end }}
+ ports:
+ - name: http-metrics
+ port: {{ .Values.metrics.service.ports.metrics }}
+ targetPort: http-metrics
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+ app.kubernetes.io/component: primary
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/primary/networkpolicy.yaml b/charts/penpot/charts/postgresql/templates/primary/networkpolicy.yaml
new file mode 100644
index 0000000..ce0052d
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/primary/networkpolicy.yaml
@@ -0,0 +1,57 @@
+{{- if and .Values.networkPolicy.enabled (or .Values.networkPolicy.metrics.enabled .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled) }}
+apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }}
+kind: NetworkPolicy
+metadata:
+ name: {{ printf "%s-ingress" (include "postgresql.primary.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: primary
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ app.kubernetes.io/component: primary
+ ingress:
+ {{- if and .Values.metrics.enabled .Values.networkPolicy.metrics.enabled (or .Values.networkPolicy.metrics.namespaceSelector .Values.networkPolicy.metrics.podSelector) }}
+ - from:
+ {{- if .Values.networkPolicy.metrics.namespaceSelector }}
+ - namespaceSelector:
+ matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.metrics.namespaceSelector "context" $) | nindent 14 }}
+ {{- end }}
+ {{- if .Values.networkPolicy.metrics.podSelector }}
+ - podSelector:
+ matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.metrics.podSelector "context" $) | nindent 14 }}
+ {{- end }}
+ ports:
+ - port: {{ .Values.metrics.containerPorts.metrics }}
+ {{- end }}
+ {{- if and .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled (or .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector) }}
+ - from:
+ {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector }}
+ - namespaceSelector:
+ matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector "context" $) | nindent 14 }}
+ {{- end }}
+ {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector }}
+ - podSelector:
+ matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector "context" $) | nindent 14 }}
+ {{- end }}
+ ports:
+ - port: {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- if and .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled (eq .Values.architecture "replication") }}
+ - from:
+ - podSelector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }}
+ app.kubernetes.io/component: read
+ ports:
+ - port: {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules "context" $) | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/primary/servicemonitor.yaml b/charts/penpot/charts/postgresql/templates/primary/servicemonitor.yaml
new file mode 100644
index 0000000..c4a19fe
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/primary/servicemonitor.yaml
@@ -0,0 +1,48 @@
+{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: {{ include "postgresql.primary.fullname" . }}
+ namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: metrics
+ {{- if .Values.metrics.serviceMonitor.labels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ {{- if .Values.metrics.serviceMonitor.jobLabel }}
+ jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }}
+ {{- end }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ {{- if .Values.metrics.serviceMonitor.selector }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }}
+ {{- end }}
+ app.kubernetes.io/component: metrics
+ endpoints:
+ - port: http-metrics
+ {{- if .Values.metrics.serviceMonitor.interval }}
+ interval: {{ .Values.metrics.serviceMonitor.interval }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
+ scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.relabelings }}
+ relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.metricRelabelings }}
+ metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.honorLabels }}
+ honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }}
+ {{- end }}
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace | quote }}
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/primary/statefulset.yaml b/charts/penpot/charts/postgresql/templates/primary/statefulset.yaml
new file mode 100644
index 0000000..653138c
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/primary/statefulset.yaml
@@ -0,0 +1,634 @@
+{{- $customUser := include "postgresql.username" . }}
+apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
+kind: StatefulSet
+metadata:
+ name: {{ include "postgresql.primary.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: primary
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.primary.labels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.labels "context" $ ) | nindent 4 }}
+ {{- end }}
+ annotations:
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.primary.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ replicas: 1
+ serviceName: {{ include "postgresql.primary.svc.headless" . }}
+ {{- if .Values.primary.updateStrategy }}
+ updateStrategy: {{- toYaml .Values.primary.updateStrategy | nindent 4 }}
+ {{- end }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ app.kubernetes.io/component: primary
+ template:
+ metadata:
+ name: {{ include "postgresql.primary.fullname" . }}
+ labels: {{- include "common.labels.standard" . | nindent 8 }}
+ app.kubernetes.io/component: primary
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.primary.podLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ annotations:
+ {{- if (include "postgresql.primary.createConfigmap" .) }}
+ checksum/configuration: {{ include (print $.Template.BasePath "/primary/configmap.yaml") . | sha256sum }}
+ {{- end }}
+ {{- if (include "postgresql.primary.createExtendedConfigmap" .) }}
+ checksum/extended-configuration: {{ include (print $.Template.BasePath "/primary/extended-configmap.yaml") . | sha256sum }}
+ {{- end }}
+ {{- if .Values.primary.podAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podAnnotations "context" $ ) | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- if .Values.primary.extraPodSpec }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraPodSpec "context" $) | nindent 6 }}
+ {{- end }}
+ serviceAccountName: {{ include "postgresql.serviceAccountName" . }}
+ {{- include "postgresql.imagePullSecrets" . | nindent 6 }}
+ {{- if .Values.primary.hostAliases }}
+ hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.primary.hostAliases "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.primary.affinity }}
+ affinity: {{- include "common.tplvalues.render" (dict "value" .Values.primary.affinity "context" $) | nindent 8 }}
+ {{- else }}
+ affinity:
+ podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAffinityPreset "component" "primary" "context" $) | nindent 10 }}
+ podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAntiAffinityPreset "component" "primary" "context" $) | nindent 10 }}
+ nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.primary.nodeAffinityPreset.type "key" .Values.primary.nodeAffinityPreset.key "values" .Values.primary.nodeAffinityPreset.values) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.primary.nodeSelector }}
+ nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.nodeSelector "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.primary.tolerations }}
+ tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.primary.tolerations "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.primary.topologySpreadConstraints }}
+ topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.primary.topologySpreadConstraints "context" .) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.primary.priorityClassName }}
+ priorityClassName: {{ .Values.primary.priorityClassName }}
+ {{- end }}
+ {{- if .Values.primary.schedulerName }}
+ schedulerName: {{ .Values.primary.schedulerName | quote }}
+ {{- end }}
+ {{- if .Values.primary.terminationGracePeriodSeconds }}
+ terminationGracePeriodSeconds: {{ .Values.primary.terminationGracePeriodSeconds }}
+ {{- end }}
+ {{- if .Values.primary.podSecurityContext.enabled }}
+ securityContext: {{- omit .Values.primary.podSecurityContext "enabled" | toYaml | nindent 8 }}
+ {{- end }}
+ hostNetwork: {{ .Values.primary.hostNetwork }}
+ hostIPC: {{ .Values.primary.hostIPC }}
+ initContainers:
+ {{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }}
+ - name: copy-certs
+ image: {{ include "postgresql.volumePermissions.image" . }}
+ imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+ {{- if .Values.primary.resources }}
+ resources: {{- toYaml .Values.primary.resources | nindent 12 }}
+ {{- end }}
+ # We don't require a privileged container in this case
+ {{- if .Values.primary.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.primary.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ command:
+ - /bin/sh
+ - -ec
+ - |
+ cp /tmp/certs/* /opt/bitnami/postgresql/certs/
+ chmod 600 {{ include "postgresql.tlsCertKey" . }}
+ volumeMounts:
+ - name: raw-certificates
+ mountPath: /tmp/certs
+ - name: postgresql-certificates
+ mountPath: /opt/bitnami/postgresql/certs
+ {{- else if and .Values.volumePermissions.enabled (or .Values.primary.persistence.enabled .Values.shmVolume.enabled) }}
+ - name: init-chmod-data
+ image: {{ include "postgresql.volumePermissions.image" . }}
+ imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+ {{- if .Values.volumePermissions.resources }}
+ resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
+ {{- end }}
+ command:
+ - /bin/sh
+ - -ec
+ - |
+ {{- if .Values.primary.persistence.enabled }}
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.primary.persistence.mountPath }}
+ {{- else }}
+ chown {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} {{ .Values.primary.persistence.mountPath }}
+ {{- end }}
+ mkdir -p {{ .Values.primary.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.primary.persistence.mountPath }}/conf {{- end }}
+ chmod 700 {{ .Values.primary.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.primary.persistence.mountPath }}/conf {{- end }}
+ find {{ .Values.primary.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ xargs -r chown -R `id -u`:`id -G | cut -d " " -f2`
+ {{- else }}
+ xargs -r chown -R {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ chmod -R 777 /dev/shm
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ cp /tmp/certs/* /opt/bitnami/postgresql/certs/
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/
+ {{- else }}
+ chown -R {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} /opt/bitnami/postgresql/certs/
+ {{- end }}
+ chmod 600 {{ include "postgresql.tlsCertKey" . }}
+ {{- end }}
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }}
+ {{- else }}
+ securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.primary.persistence.enabled }}
+ - name: data
+ mountPath: {{ .Values.primary.persistence.mountPath }}
+ {{- if .Values.primary.persistence.subPath }}
+ subPath: {{ .Values.primary.persistence.subPath }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ - name: dshm
+ mountPath: /dev/shm
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: raw-certificates
+ mountPath: /tmp/certs
+ - name: postgresql-certificates
+ mountPath: /opt/bitnami/postgresql/certs
+ {{- end }}
+ {{- end }}
+ {{- if .Values.primary.initContainers }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.initContainers "context" $ ) | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: postgresql
+ image: {{ include "postgresql.image" . }}
+ imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+ {{- if .Values.primary.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.primary.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ {{- else if .Values.primary.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.primary.command "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- else if .Values.primary.args }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.primary.args "context" $) | nindent 12 }}
+ {{- end }}
+ env:
+ - name: BITNAMI_DEBUG
+ value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
+ - name: POSTGRESQL_PORT_NUMBER
+ value: {{ .Values.containerPorts.postgresql | quote }}
+ - name: POSTGRESQL_VOLUME_DIR
+ value: {{ .Values.primary.persistence.mountPath | quote }}
+ {{- if .Values.primary.persistence.mountPath }}
+ - name: PGDATA
+ value: {{ .Values.postgresqlDataDir | quote }}
+ {{- end }}
+ # Authentication
+ {{- if and (not (empty $customUser)) (ne $customUser "postgres") }}
+ - name: POSTGRES_USER
+ value: {{ $customUser | quote }}
+ {{- if .Values.auth.enablePostgresUser }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: POSTGRES_POSTGRES_PASSWORD_FILE
+ value: "/opt/bitnami/postgresql/secrets/postgres-password"
+ {{- else }}
+ - name: POSTGRES_POSTGRES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.secretName" . }}
+ key: {{ include "postgresql.adminPasswordKey" . }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: POSTGRES_PASSWORD_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres"))) }}
+ {{- else }}
+ - name: POSTGRES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.secretName" . }}
+ key: {{ include "postgresql.userPasswordKey" . }}
+ {{- end }}
+ {{- if (include "postgresql.database" .) }}
+ - name: POSTGRES_DB
+ value: {{ (include "postgresql.database" .) | quote }}
+ {{- end }}
+ # Replication
+ {{- if or (eq .Values.architecture "replication") .Values.primary.standby.enabled }}
+ - name: POSTGRES_REPLICATION_MODE
+ value: {{ ternary "slave" "master" .Values.primary.standby.enabled | quote }}
+ - name: POSTGRES_REPLICATION_USER
+ value: {{ .Values.auth.replicationUsername | quote }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: POSTGRES_REPLICATION_PASSWORD_FILE
+ value: "/opt/bitnami/postgresql/secrets/replication-password"
+ {{- else }}
+ - name: POSTGRES_REPLICATION_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.secretName" . }}
+ key: {{ include "postgresql.replicationPasswordKey" . }}
+ {{- end }}
+ {{- if not (eq .Values.replication.synchronousCommit "off") }}
+ - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE
+ value: {{ .Values.replication.synchronousCommit | quote }}
+ - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS
+ value: {{ .Values.replication.numSynchronousReplicas | quote }}
+ {{- end }}
+ - name: POSTGRES_CLUSTER_APP_NAME
+ value: {{ .Values.replication.applicationName }}
+ {{- end }}
+ # Initdb
+ {{- if .Values.primary.initdb.args }}
+ - name: POSTGRES_INITDB_ARGS
+ value: {{ .Values.primary.initdb.args | quote }}
+ {{- end }}
+ {{- if .Values.primary.initdb.postgresqlWalDir }}
+ - name: POSTGRES_INITDB_WALDIR
+ value: {{ .Values.primary.initdb.postgresqlWalDir | quote }}
+ {{- end }}
+ {{- if .Values.primary.initdb.user }}
+ - name: POSTGRESQL_INITSCRIPTS_USERNAME
+ value: {{ .Values.primary.initdb.user }}
+ {{- end }}
+ {{- if .Values.primary.initdb.password }}
+ - name: POSTGRESQL_INITSCRIPTS_PASSWORD
+ value: {{ .Values.primary.initdb.password | quote }}
+ {{- end }}
+ # Standby
+ {{- if .Values.primary.standby.enabled }}
+ - name: POSTGRES_MASTER_HOST
+ value: {{ .Values.primary.standby.primaryHost }}
+ - name: POSTGRES_MASTER_PORT_NUMBER
+ value: {{ .Values.primary.standby.primaryPort | quote }}
+ {{- end }}
+ # LDAP
+ - name: POSTGRESQL_ENABLE_LDAP
+ value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }}
+ {{- if .Values.ldap.enabled }}
+ {{- if or .Values.ldap.url .Values.ldap.uri }}
+ - name: POSTGRESQL_LDAP_URL
+ value: {{ coalesce .Values.ldap.url .Values.ldap.uri }}
+ {{- else }}
+ - name: POSTGRESQL_LDAP_SERVER
+ value: {{ .Values.ldap.server }}
+ - name: POSTGRESQL_LDAP_PORT
+ value: {{ .Values.ldap.port | quote }}
+ - name: POSTGRESQL_LDAP_SCHEME
+ value: {{ .Values.ldap.scheme }}
+ {{- if (include "postgresql.ldap.tls.enabled" .) }}
+ - name: POSTGRESQL_LDAP_TLS
+ value: "1"
+ {{- end }}
+ - name: POSTGRESQL_LDAP_PREFIX
+ value: {{ .Values.ldap.prefix | quote }}
+ - name: POSTGRESQL_LDAP_SUFFIX
+ value: {{ .Values.ldap.suffix | quote }}
+ - name: POSTGRESQL_LDAP_BASE_DN
+ value: {{ coalesce .Values.ldap.baseDN .Values.ldap.basedn }}
+ - name: POSTGRESQL_LDAP_BIND_DN
+ value: {{ coalesce .Values.ldap.bindDN .Values.ldap.binddn}}
+ {{- if or (not (empty .Values.ldap.bind_password)) (not (empty .Values.ldap.bindpw)) }}
+ - name: POSTGRESQL_LDAP_BIND_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.secretName" . }}
+ key: ldap-password
+ {{- end }}
+ - name: POSTGRESQL_LDAP_SEARCH_ATTR
+ value: {{ coalesce .Values.ldap.search_attr .Values.ldap.searchAttribute }}
+ - name: POSTGRESQL_LDAP_SEARCH_FILTER
+ value: {{ coalesce .Values.ldap.search_filter .Values.ldap.searchFilter }}
+ {{- end }}
+ {{- end }}
+ # TLS
+ - name: POSTGRESQL_ENABLE_TLS
+ value: {{ ternary "yes" "no" .Values.tls.enabled | quote }}
+ {{- if .Values.tls.enabled }}
+ - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS
+ value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }}
+ - name: POSTGRESQL_TLS_CERT_FILE
+ value: {{ include "postgresql.tlsCert" . }}
+ - name: POSTGRESQL_TLS_KEY_FILE
+ value: {{ include "postgresql.tlsCertKey" . }}
+ {{- if .Values.tls.certCAFilename }}
+ - name: POSTGRESQL_TLS_CA_FILE
+ value: {{ include "postgresql.tlsCACert" . }}
+ {{- end }}
+ {{- if .Values.tls.crlFilename }}
+ - name: POSTGRESQL_TLS_CRL_FILE
+ value: {{ include "postgresql.tlsCRL" . }}
+ {{- end }}
+ {{- end }}
+ # Audit
+ - name: POSTGRESQL_LOG_HOSTNAME
+ value: {{ .Values.audit.logHostname | quote }}
+ - name: POSTGRESQL_LOG_CONNECTIONS
+ value: {{ .Values.audit.logConnections | quote }}
+ - name: POSTGRESQL_LOG_DISCONNECTIONS
+ value: {{ .Values.audit.logDisconnections | quote }}
+ {{- if .Values.audit.logLinePrefix }}
+ - name: POSTGRESQL_LOG_LINE_PREFIX
+ value: {{ .Values.audit.logLinePrefix | quote }}
+ {{- end }}
+ {{- if .Values.audit.logTimezone }}
+ - name: POSTGRESQL_LOG_TIMEZONE
+ value: {{ .Values.audit.logTimezone | quote }}
+ {{- end }}
+ {{- if .Values.audit.pgAuditLog }}
+ - name: POSTGRESQL_PGAUDIT_LOG
+ value: {{ .Values.audit.pgAuditLog | quote }}
+ {{- end }}
+ - name: POSTGRESQL_PGAUDIT_LOG_CATALOG
+ value: {{ .Values.audit.pgAuditLogCatalog | quote }}
+ # Others
+ - name: POSTGRESQL_CLIENT_MIN_MESSAGES
+ value: {{ .Values.audit.clientMinMessages | quote }}
+ - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES
+ value: {{ .Values.postgresqlSharedPreloadLibraries | quote }}
+ {{- if .Values.primary.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if or .Values.primary.extraEnvVarsCM .Values.primary.extraEnvVarsSecret }}
+ envFrom:
+ {{- if .Values.primary.extraEnvVarsCM }}
+ - configMapRef:
+ name: {{ .Values.primary.extraEnvVarsCM }}
+ {{- end }}
+ {{- if .Values.primary.extraEnvVarsSecret }}
+ - secretRef:
+ name: {{ .Values.primary.extraEnvVarsSecret }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - name: tcp-postgresql
+ containerPort: {{ .Values.containerPorts.postgresql }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.primary.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.primary.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.startupProbe "enabled") "context" $) | nindent 12 }}
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ {{- if (include "postgresql.database" .) }}
+ - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- else }}
+ - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.primary.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.primary.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.livenessProbe "enabled") "context" $) | nindent 12 }}
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ {{- if (include "postgresql.database" .) }}
+ - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- else }}
+ - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.primary.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.primary.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.readinessProbe "enabled") "context" $) | nindent 12 }}
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - -e
+ {{- include "postgresql.readinessProbeCommand" . | nindent 16 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.primary.resources }}
+ resources: {{- toYaml .Values.primary.resources | nindent 12 }}
+ {{- end }}
+ {{- if .Values.primary.lifecycleHooks }}
+ lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.primary.lifecycleHooks "context" $) | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ {{- if or .Values.primary.initdb.scriptsConfigMap .Values.primary.initdb.scripts }}
+ - name: custom-init-scripts
+ mountPath: /docker-entrypoint-initdb.d/
+ {{- end }}
+ {{- if .Values.primary.initdb.scriptsSecret }}
+ - name: custom-init-scripts-secret
+ mountPath: /docker-entrypoint-initdb.d/secret
+ {{- end }}
+ {{- if or .Values.primary.extendedConfiguration .Values.primary.existingExtendedConfigmap }}
+ - name: postgresql-extended-config
+ mountPath: /bitnami/postgresql/conf/conf.d/
+ {{- end }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: postgresql-password
+ mountPath: /opt/bitnami/postgresql/secrets/
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: postgresql-certificates
+ mountPath: /opt/bitnami/postgresql/certs
+ readOnly: true
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ - name: dshm
+ mountPath: /dev/shm
+ {{- end }}
+ {{- if .Values.primary.persistence.enabled }}
+ - name: data
+ mountPath: {{ .Values.primary.persistence.mountPath }}
+ {{- if .Values.primary.persistence.subPath }}
+ subPath: {{ .Values.primary.persistence.subPath }}
+ {{- end }}
+ {{- end }}
+ {{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }}
+ - name: postgresql-config
+ mountPath: /bitnami/postgresql/conf
+ {{- end }}
+ {{- if .Values.primary.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraVolumeMounts "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.metrics.enabled }}
+ - name: metrics
+ image: {{ include "postgresql.metrics.image" . }}
+ imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
+ {{- if .Values.metrics.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.customMetrics }}
+ args: ["--extend.query-path", "/conf/custom-metrics.yaml"]
+ {{- end }}
+ env:
+ {{- $database := required "In order to enable metrics you need to specify a database (.Values.auth.database or .Values.global.postgresql.auth.database)" (include "postgresql.database" .) }}
+ - name: DATA_SOURCE_URI
+ value: {{ printf "127.0.0.1:%d/%s?sslmode=disable" (int (include "postgresql.service.port" .)) $database }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: DATA_SOURCE_PASS_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres"))) }}
+ {{- else }}
+ - name: DATA_SOURCE_PASS
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.secretName" . }}
+ key: {{ include "postgresql.userPasswordKey" . }}
+ {{- end }}
+ - name: DATA_SOURCE_USER
+ value: {{ default "postgres" $customUser | quote }}
+ {{- if .Values.metrics.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ ports:
+ - name: http-metrics
+ containerPort: {{ .Values.metrics.containerPorts.metrics }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.metrics.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: http-metrics
+ {{- end }}
+ {{- if .Values.metrics.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /
+ port: http-metrics
+ {{- end }}
+ {{- if .Values.metrics.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /
+ port: http-metrics
+ {{- end }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: postgresql-password
+ mountPath: /opt/bitnami/postgresql/secrets/
+ {{- end }}
+ {{- if .Values.metrics.customMetrics }}
+ - name: custom-metrics
+ mountPath: /conf
+ readOnly: true
+ {{- end }}
+ {{- if .Values.metrics.resources }}
+ resources: {{- toYaml .Values.metrics.resources | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.primary.sidecars }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.sidecars "context" $ ) | nindent 8 }}
+ {{- end }}
+ volumes:
+ {{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }}
+ - name: postgresql-config
+ configMap:
+ name: {{ include "postgresql.primary.configmapName" . }}
+ {{- end }}
+ {{- if or .Values.primary.extendedConfiguration .Values.primary.existingExtendedConfigmap }}
+ - name: postgresql-extended-config
+ configMap:
+ name: {{ include "postgresql.primary.extendedConfigmapName" . }}
+ {{- end }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: postgresql-password
+ secret:
+ secretName: {{ include "postgresql.secretName" . }}
+ {{- end }}
+ {{- if or .Values.primary.initdb.scriptsConfigMap .Values.primary.initdb.scripts }}
+ - name: custom-init-scripts
+ configMap:
+ name: {{ include "postgresql.initdb.scriptsCM" . }}
+ {{- end }}
+ {{- if .Values.primary.initdb.scriptsSecret }}
+ - name: custom-init-scripts-secret
+ secret:
+ secretName: {{ tpl .Values.primary.initdb.scriptsSecret $ }}
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: raw-certificates
+ secret:
+ secretName: {{ include "postgresql.tlsSecretName" . }}
+ - name: postgresql-certificates
+ emptyDir: {}
+ {{- end }}
+ {{- if .Values.primary.extraVolumes }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.extraVolumes "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }}
+ - name: custom-metrics
+ configMap:
+ name: {{ printf "%s-metrics" (include "postgresql.primary.fullname" .) }}
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ - name: dshm
+ emptyDir:
+ medium: Memory
+ {{- if .Values.shmVolume.sizeLimit }}
+ sizeLimit: {{ .Values.shmVolume.sizeLimit }}
+ {{- end }}
+ {{- end }}
+ {{- if and .Values.primary.persistence.enabled .Values.primary.persistence.existingClaim }}
+ - name: data
+ persistentVolumeClaim:
+ claimName: {{ tpl .Values.primary.persistence.existingClaim $ }}
+ {{- else if not .Values.primary.persistence.enabled }}
+ - name: data
+ emptyDir: {}
+ {{- else }}
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ {{- if .Values.primary.persistence.annotations }}
+ annotations: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.annotations "context" $) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.primary.persistence.labels }}
+ labels: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.labels "context" $) | nindent 10 }}
+ {{- end }}
+ spec:
+ accessModes:
+ {{- range .Values.primary.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ {{- if .Values.primary.persistence.dataSource }}
+ dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.dataSource "context" $) | nindent 10 }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.primary.persistence.size | quote }}
+ {{- if .Values.primary.persistence.selector }}
+ selector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.selector "context" $) | nindent 10 }}
+ {{- end }}
+ {{- include "common.storage.class" (dict "persistence" .Values.primary.persistence "global" .Values.global) | nindent 8 }}
+ {{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/primary/svc-headless.yaml b/charts/penpot/charts/postgresql/templates/primary/svc-headless.yaml
new file mode 100644
index 0000000..684177a
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/primary/svc-headless.yaml
@@ -0,0 +1,37 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "postgresql.primary.svc.headless" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ app.kubernetes.io/component: primary
+ {{- if or .Values.primary.service.headless.annotations .Values.commonAnnotations }}
+ annotations:
+ {{- if .Values.primary.service.headless.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.service.headless.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+ # Use this annotation in addition to the actual publishNotReadyAddresses
+ # field below because the annotation will stop being respected soon but the
+ # field is broken in some versions of Kubernetes:
+ # https://github.com/kubernetes/kubernetes/issues/58662
+ service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
+spec:
+ type: ClusterIP
+ clusterIP: None
+ # We want all pods in the StatefulSet to have their addresses published for
+ # the sake of the other Postgresql pods even before they're ready, since they
+ # have to be able to talk to each other in order to become ready.
+ publishNotReadyAddresses: true
+ ports:
+ - name: tcp-postgresql
+ port: {{ template "postgresql.service.port" . }}
+ targetPort: tcp-postgresql
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+ app.kubernetes.io/component: primary
diff --git a/charts/penpot/charts/postgresql/templates/primary/svc.yaml b/charts/penpot/charts/postgresql/templates/primary/svc.yaml
new file mode 100644
index 0000000..cf18480
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/primary/svc.yaml
@@ -0,0 +1,51 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "postgresql.primary.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ app.kubernetes.io/component: primary
+ annotations:
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.primary.service.annotations }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.annotations "context" $) | nindent 4 }}
+ {{- end }}
+spec:
+ type: {{ .Values.primary.service.type }}
+ {{- if or (eq .Values.primary.service.type "LoadBalancer") (eq .Values.primary.service.type "NodePort") }}
+ externalTrafficPolicy: {{ .Values.primary.service.externalTrafficPolicy | quote }}
+ {{- end }}
+ {{- if and (eq .Values.primary.service.type "LoadBalancer") (not (empty .Values.primary.service.loadBalancerSourceRanges)) }}
+ loadBalancerSourceRanges: {{ .Values.primary.service.loadBalancerSourceRanges }}
+ {{- end }}
+ {{- if and (eq .Values.primary.service.type "LoadBalancer") (not (empty .Values.primary.service.loadBalancerIP)) }}
+ loadBalancerIP: {{ .Values.primary.service.loadBalancerIP }}
+ {{- end }}
+ {{- if and .Values.primary.service.clusterIP (eq .Values.primary.service.type "ClusterIP") }}
+ clusterIP: {{ .Values.primary.service.clusterIP }}
+ {{- end }}
+ {{- if .Values.primary.service.sessionAffinity }}
+ sessionAffinity: {{ .Values.primary.service.sessionAffinity }}
+ {{- end }}
+ {{- if .Values.primary.service.sessionAffinityConfig }}
+ sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.sessionAffinityConfig "context" $) | nindent 4 }}
+ {{- end }}
+ ports:
+ - name: tcp-postgresql
+ port: {{ template "postgresql.service.port" . }}
+ targetPort: tcp-postgresql
+ {{- if and (or (eq .Values.primary.service.type "NodePort") (eq .Values.primary.service.type "LoadBalancer")) (not (empty .Values.primary.service.nodePorts.postgresql)) }}
+ nodePort: {{ .Values.primary.service.nodePorts.postgresql }}
+ {{- else if eq .Values.primary.service.type "ClusterIP" }}
+ nodePort: null
+ {{- end }}
+ {{- if .Values.primary.service.extraPorts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.extraPorts "context" $) | nindent 4 }}
+ {{- end }}
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+ app.kubernetes.io/component: primary
diff --git a/charts/penpot/charts/postgresql/templates/prometheusrule.yaml b/charts/penpot/charts/postgresql/templates/prometheusrule.yaml
new file mode 100644
index 0000000..24be710
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/prometheusrule.yaml
@@ -0,0 +1,22 @@
+{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: PrometheusRule
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ namespace: {{ default .Release.Namespace .Values.metrics.prometheusRule.namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: metrics
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.metrics.prometheusRule.labels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.labels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ groups:
+ - name: {{ include "common.names.fullname" . }}
+ rules: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.rules "context" $ ) | nindent 8 }}
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/psp.yaml b/charts/penpot/charts/postgresql/templates/psp.yaml
new file mode 100644
index 0000000..48d1175
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/psp.yaml
@@ -0,0 +1,41 @@
+{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}}
+{{- if and $pspAvailable .Values.psp.create }}
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ privileged: false
+ volumes:
+ - 'configMap'
+ - 'secret'
+ - 'persistentVolumeClaim'
+ - 'emptyDir'
+ - 'projected'
+ hostNetwork: false
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ rule: 'RunAsAny'
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1
+ max: 65535
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1
+ max: 65535
+ readOnlyRootFilesystem: false
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/read/extended-configmap.yaml b/charts/penpot/charts/postgresql/templates/read/extended-configmap.yaml
new file mode 100644
index 0000000..e329d13
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/read/extended-configmap.yaml
@@ -0,0 +1,18 @@
+{{- if (include "postgresql.readReplicas.createExtendedConfigmap" .) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-extended-configuration" (include "postgresql.readReplica.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: read
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+ override.conf: |-
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.extendedConfiguration "context" $ ) | nindent 4 }}
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/read/metrics-configmap.yaml b/charts/penpot/charts/postgresql/templates/read/metrics-configmap.yaml
new file mode 100644
index 0000000..b00a6ec
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/read/metrics-configmap.yaml
@@ -0,0 +1,16 @@
+{{- if and .Values.metrics.enabled .Values.metrics.customMetrics (eq .Values.architecture "replication") }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-metrics" (include "postgresql.readReplica.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+ custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }}
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/read/metrics-svc.yaml b/charts/penpot/charts/postgresql/templates/read/metrics-svc.yaml
new file mode 100644
index 0000000..b3e5497
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/read/metrics-svc.yaml
@@ -0,0 +1,31 @@
+{{- if and .Values.metrics.enabled (eq .Values.architecture "replication") }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ printf "%s-metrics" (include "postgresql.readReplica.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: metrics-read
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ annotations:
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.metrics.service.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ type: ClusterIP
+ sessionAffinity: {{ .Values.metrics.service.sessionAffinity }}
+ {{- if .Values.metrics.service.clusterIP }}
+ clusterIP: {{ .Values.metrics.service.clusterIP }}
+ {{- end }}
+ ports:
+ - name: http-metrics
+ port: {{ .Values.metrics.service.ports.metrics }}
+ targetPort: http-metrics
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+ app.kubernetes.io/component: read
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/read/networkpolicy.yaml b/charts/penpot/charts/postgresql/templates/read/networkpolicy.yaml
new file mode 100644
index 0000000..c969cd7
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/read/networkpolicy.yaml
@@ -0,0 +1,36 @@
+{{- if and .Values.networkPolicy.enabled (eq .Values.architecture "replication") .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled }}
+apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }}
+kind: NetworkPolicy
+metadata:
+ name: {{ printf "%s-ingress" (include "postgresql.readReplica.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: read
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ app.kubernetes.io/component: read
+ ingress:
+ {{- if and .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled (or .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector) }}
+ - from:
+ {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector }}
+ - namespaceSelector:
+ matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector "context" $) | nindent 14 }}
+ {{- end }}
+ {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector }}
+ - podSelector:
+ matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector "context" $) | nindent 14 }}
+ {{- end }}
+ ports:
+ - port: {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules "context" $) | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/read/servicemonitor.yaml b/charts/penpot/charts/postgresql/templates/read/servicemonitor.yaml
new file mode 100644
index 0000000..d511d6b
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/read/servicemonitor.yaml
@@ -0,0 +1,48 @@
+{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled (eq .Values.architecture "replication") }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: {{ include "postgresql.readReplica.fullname" . }}
+ namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: metrics-read
+ {{- if .Values.metrics.serviceMonitor.labels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ {{- if .Values.metrics.serviceMonitor.jobLabel }}
+ jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }}
+ {{- end }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ {{- if .Values.metrics.serviceMonitor.selector }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }}
+ {{- end }}
+ app.kubernetes.io/component: metrics-read
+ endpoints:
+ - port: http-metrics
+ {{- if .Values.metrics.serviceMonitor.interval }}
+ interval: {{ .Values.metrics.serviceMonitor.interval }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
+ scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.relabelings }}
+ relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.metricRelabelings }}
+ metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.honorLabels }}
+ honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }}
+ {{- end }}
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace | quote }}
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/read/statefulset.yaml b/charts/penpot/charts/postgresql/templates/read/statefulset.yaml
new file mode 100644
index 0000000..80c8e8b
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/read/statefulset.yaml
@@ -0,0 +1,531 @@
+{{- if eq .Values.architecture "replication" }}
+{{- $customUser := include "postgresql.username" . }}
+apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
+kind: StatefulSet
+metadata:
+ name: {{ include "postgresql.readReplica.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: read
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.readReplicas.labels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.labels "context" $ ) | nindent 4 }}
+ {{- end }}
+ annotations:
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.readReplicas.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ replicas: {{ .Values.readReplicas.replicaCount }}
+ serviceName: {{ include "postgresql.readReplica.svc.headless" . }}
+ {{- if .Values.readReplicas.updateStrategy }}
+ updateStrategy: {{- toYaml .Values.readReplicas.updateStrategy | nindent 4 }}
+ {{- end }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ app.kubernetes.io/component: read
+ template:
+ metadata:
+ name: {{ include "postgresql.readReplica.fullname" . }}
+ labels: {{- include "common.labels.standard" . | nindent 8 }}
+ app.kubernetes.io/component: read
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.readReplicas.podLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.podLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ annotations:
+ {{- if (include "postgresql.readReplicas.createExtendedConfigmap" .) }}
+ checksum/extended-configuration: {{ include (print $.Template.BasePath "/read/extended-configmap.yaml") . | sha256sum }}
+ {{- end }}
+ {{- if .Values.readReplicas.podAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.podAnnotations "context" $ ) | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- if .Values.readReplicas.extraPodSpec }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraPodSpec "context" $) | nindent 6 }}
+ {{- end }}
+ serviceAccountName: {{ include "postgresql.serviceAccountName" . }}
+ {{- include "postgresql.imagePullSecrets" . | nindent 6 }}
+ {{- if .Values.readReplicas.hostAliases }}
+ hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.hostAliases "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.readReplicas.affinity }}
+ affinity: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.affinity "context" $) | nindent 8 }}
+ {{- else }}
+ affinity:
+ podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.readReplicas.podAffinityPreset "component" "read" "context" $) | nindent 10 }}
+ podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.readReplicas.podAntiAffinityPreset "component" "read" "context" $) | nindent 10 }}
+ nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.readReplicas.nodeAffinityPreset.type "key" .Values.readReplicas.nodeAffinityPreset.key "values" .Values.readReplicas.nodeAffinityPreset.values) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.readReplicas.nodeSelector }}
+ nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.nodeSelector "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.readReplicas.tolerations }}
+ tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.tolerations "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.readReplicas.topologySpreadConstraints }}
+ topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.topologySpreadConstraints "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.readReplicas.priorityClassName }}
+ priorityClassName: {{ .Values.readReplicas.priorityClassName }}
+ {{- end }}
+ {{- if .Values.readReplicas.schedulerName }}
+ schedulerName: {{ .Values.readReplicas.schedulerName | quote }}
+ {{- end }}
+ {{- if .Values.readReplicas.terminationGracePeriodSeconds }}
+ terminationGracePeriodSeconds: {{ .Values.readReplicas.terminationGracePeriodSeconds }}
+ {{- end }}
+ {{- if .Values.readReplicas.podSecurityContext.enabled }}
+ securityContext: {{- omit .Values.readReplicas.podSecurityContext "enabled" | toYaml | nindent 8 }}
+ {{- end }}
+ hostNetwork: {{ .Values.readReplicas.hostNetwork }}
+ hostIPC: {{ .Values.readReplicas.hostIPC }}
+ initContainers:
+ {{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }}
+ - name: copy-certs
+ image: {{ include "postgresql.volumePermissions.image" . }}
+ imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+ {{- if .Values.readReplicas.resources }}
+ resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }}
+ {{- end }}
+ # We don't require a privileged container in this case
+ {{- if .Values.readReplicas.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.readReplicas.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ command:
+ - /bin/sh
+ - -ec
+ - |
+ cp /tmp/certs/* /opt/bitnami/postgresql/certs/
+ chmod 600 {{ include "postgresql.tlsCertKey" . }}
+ volumeMounts:
+ - name: raw-certificates
+ mountPath: /tmp/certs
+ - name: postgresql-certificates
+ mountPath: /opt/bitnami/postgresql/certs
+ {{- else if and .Values.volumePermissions.enabled (or .Values.readReplicas.persistence.enabled .Values.shmVolume.enabled) }}
+ - name: init-chmod-data
+ image: {{ include "postgresql.volumePermissions.image" . }}
+ imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+ {{- if .Values.readReplicas.resources }}
+ resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }}
+ {{- end }}
+ command:
+ - /bin/sh
+ - -ec
+ - |
+ {{- if .Values.readReplicas.persistence.enabled }}
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.readReplicas.persistence.mountPath }}
+ {{- else }}
+ chown {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} {{ .Values.readReplicas.persistence.mountPath }}
+ {{- end }}
+ mkdir -p {{ .Values.readReplicas.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.readReplicas.persistence.mountPath }}/conf {{- end }}
+ chmod 700 {{ .Values.readReplicas.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.readReplicas.persistence.mountPath }}/conf {{- end }}
+ find {{ .Values.readReplicas.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ xargs -r chown -R `id -u`:`id -G | cut -d " " -f2`
+ {{- else }}
+ xargs -r chown -R {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ chmod -R 777 /dev/shm
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ cp /tmp/certs/* /opt/bitnami/postgresql/certs/
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/
+ {{- else }}
+ chown -R {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} /opt/bitnami/postgresql/certs/
+ {{- end }}
+ chmod 600 {{ include "postgresql.tlsCertKey" . }}
+ {{- end }}
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }}
+ {{- else }}
+ securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ {{ if .Values.readReplicas.persistence.enabled }}
+ - name: data
+ mountPath: {{ .Values.readReplicas.persistence.mountPath }}
+ {{- if .Values.readReplicas.persistence.subPath }}
+ subPath: {{ .Values.readReplicas.persistence.subPath }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ - name: dshm
+ mountPath: /dev/shm
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: raw-certificates
+ mountPath: /tmp/certs
+ - name: postgresql-certificates
+ mountPath: /opt/bitnami/postgresql/certs
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.initContainers }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.initContainers "context" $ ) | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: postgresql
+ image: {{ include "postgresql.image" . }}
+ imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+ {{- if .Values.readReplicas.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.readReplicas.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ {{- else if .Values.readReplicas.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.command "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- else if .Values.readReplicas.args }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.args "context" $) | nindent 12 }}
+ {{- end }}
+ env:
+ - name: BITNAMI_DEBUG
+ value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
+ - name: POSTGRESQL_PORT_NUMBER
+ value: {{ .Values.containerPorts.postgresql | quote }}
+ - name: POSTGRESQL_VOLUME_DIR
+ value: {{ .Values.readReplicas.persistence.mountPath | quote }}
+ {{- if .Values.readReplicas.persistence.mountPath }}
+ - name: PGDATA
+ value: {{ .Values.postgresqlDataDir | quote }}
+ {{- end }}
+ # Authentication
+ {{- if and (not (empty $customUser)) (ne $customUser "postgres") .Values.auth.enablePostgresUser }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: POSTGRES_POSTGRES_PASSWORD_FILE
+ value: "/opt/bitnami/postgresql/secrets/postgres-password"
+ {{- else }}
+ - name: POSTGRES_POSTGRES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.secretName" . }}
+ key: {{ include "postgresql.adminPasswordKey" . }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: POSTGRES_PASSWORD_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres"))) }}
+ {{- else }}
+ - name: POSTGRES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.secretName" . }}
+ key: {{ include "postgresql.userPasswordKey" . }}
+ {{- end }}
+ # Replication
+ - name: POSTGRES_REPLICATION_MODE
+ value: "slave"
+ - name: POSTGRES_REPLICATION_USER
+ value: {{ .Values.auth.replicationUsername | quote }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: POSTGRES_REPLICATION_PASSWORD_FILE
+ value: "/opt/bitnami/postgresql/secrets/replication-password"
+ {{- else }}
+ - name: POSTGRES_REPLICATION_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.secretName" . }}
+ key: {{ include "postgresql.replicationPasswordKey" . }}
+ {{- end }}
+ - name: POSTGRES_CLUSTER_APP_NAME
+ value: {{ .Values.replication.applicationName }}
+ - name: POSTGRES_MASTER_HOST
+ value: {{ include "postgresql.primary.fullname" . }}
+ - name: POSTGRES_MASTER_PORT_NUMBER
+ value: {{ include "postgresql.service.port" . | quote }}
+ # TLS
+ - name: POSTGRESQL_ENABLE_TLS
+ value: {{ ternary "yes" "no" .Values.tls.enabled | quote }}
+ {{- if .Values.tls.enabled }}
+ - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS
+ value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }}
+ - name: POSTGRESQL_TLS_CERT_FILE
+ value: {{ include "postgresql.tlsCert" . }}
+ - name: POSTGRESQL_TLS_KEY_FILE
+ value: {{ include "postgresql.tlsCertKey" . }}
+ {{- if .Values.tls.certCAFilename }}
+ - name: POSTGRESQL_TLS_CA_FILE
+ value: {{ include "postgresql.tlsCACert" . }}
+ {{- end }}
+ {{- if .Values.tls.crlFilename }}
+ - name: POSTGRESQL_TLS_CRL_FILE
+ value: {{ include "postgresql.tlsCRL" . }}
+ {{- end }}
+ {{- end }}
+ # Audit
+ - name: POSTGRESQL_LOG_HOSTNAME
+ value: {{ .Values.audit.logHostname | quote }}
+ - name: POSTGRESQL_LOG_CONNECTIONS
+ value: {{ .Values.audit.logConnections | quote }}
+ - name: POSTGRESQL_LOG_DISCONNECTIONS
+ value: {{ .Values.audit.logDisconnections | quote }}
+ {{- if .Values.audit.logLinePrefix }}
+ - name: POSTGRESQL_LOG_LINE_PREFIX
+ value: {{ .Values.audit.logLinePrefix | quote }}
+ {{- end }}
+ {{- if .Values.audit.logTimezone }}
+ - name: POSTGRESQL_LOG_TIMEZONE
+ value: {{ .Values.audit.logTimezone | quote }}
+ {{- end }}
+ {{- if .Values.audit.pgAuditLog }}
+ - name: POSTGRESQL_PGAUDIT_LOG
+ value: {{ .Values.audit.pgAuditLog | quote }}
+ {{- end }}
+ - name: POSTGRESQL_PGAUDIT_LOG_CATALOG
+ value: {{ .Values.audit.pgAuditLogCatalog | quote }}
+ # Others
+ - name: POSTGRESQL_CLIENT_MIN_MESSAGES
+ value: {{ .Values.audit.clientMinMessages | quote }}
+ - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES
+ value: {{ .Values.postgresqlSharedPreloadLibraries | quote }}
+ {{- if .Values.readReplicas.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if or .Values.readReplicas.extraEnvVarsCM .Values.readReplicas.extraEnvVarsSecret }}
+ envFrom:
+ {{- if .Values.readReplicas.extraEnvVarsCM }}
+ - configMapRef:
+ name: {{ .Values.readReplicas.extraEnvVarsCM }}
+ {{- end }}
+ {{- if .Values.readReplicas.extraEnvVarsSecret }}
+ - secretRef:
+ name: {{ .Values.readReplicas.extraEnvVarsSecret }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - name: tcp-postgresql
+ containerPort: {{ .Values.containerPorts.postgresql }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.readReplicas.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.readReplicas.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.startupProbe "enabled") "context" $) | nindent 12 }}
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ {{- if (include "postgresql.database" .) }}
+ - exec pg_isready -U {{ default "postgres" $customUser| quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- else }}
+ - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.readReplicas.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.livenessProbe "enabled") "context" $) | nindent 12 }}
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ {{- if (include "postgresql.database" .) }}
+ - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- else }}
+ - exec pg_isready -U {{default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.readReplicas.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.readinessProbe "enabled") "context" $) | nindent 12 }}
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - -e
+ {{- include "postgresql.readinessProbeCommand" . | nindent 16 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.resources }}
+ resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }}
+ {{- end }}
+ {{- if .Values.readReplicas.lifecycleHooks }}
+ lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.lifecycleHooks "context" $) | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: postgresql-password
+ mountPath: /opt/bitnami/postgresql/secrets/
+ {{- end }}
+ {{- if .Values.readReplicas.extendedConfiguration }}
+ - name: postgresql-extended-config
+ mountPath: /bitnami/postgresql/conf/conf.d/
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: postgresql-certificates
+ mountPath: /opt/bitnami/postgresql/certs
+ readOnly: true
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ - name: dshm
+ mountPath: /dev/shm
+ {{- end }}
+ {{- if .Values.readReplicas.persistence.enabled }}
+ - name: data
+ mountPath: {{ .Values.readReplicas.persistence.mountPath }}
+ {{- if .Values.readReplicas.persistence.subPath }}
+ subPath: {{ .Values.readReplicas.persistence.subPath }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraVolumeMounts "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.metrics.enabled }}
+ - name: metrics
+ image: {{ include "postgresql.metrics.image" . }}
+ imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
+ {{- if .Values.metrics.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.customMetrics }}
+ args: [ "--extend.query-path", "/conf/custom-metrics.yaml" ]
+ {{- end }}
+ env:
+ {{- $database := required "In order to enable metrics you need to specify a database (.Values.auth.database or .Values.global.postgresql.auth.database)" (include "postgresql.database" .) }}
+ - name: DATA_SOURCE_URI
+ value: {{ printf "127.0.0.1:%d/%s?sslmode=disable" (int (include "postgresql.service.port" .)) $database }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: DATA_SOURCE_PASS_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres"))) }}
+ {{- else }}
+ - name: DATA_SOURCE_PASS
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.secretName" . }}
+ key: {{ include "postgresql.userPasswordKey" . }}
+ {{- end }}
+ - name: DATA_SOURCE_USER
+ value: {{ default "postgres" $customUser | quote }}
+ {{- if .Values.metrics.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ ports:
+ - name: http-metrics
+ containerPort: {{ .Values.metrics.containerPorts.metrics }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.metrics.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: http-metrics
+ {{- end }}
+ {{- if .Values.metrics.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /
+ port: http-metrics
+ {{- end }}
+ {{- if .Values.metrics.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /
+ port: http-metrics
+ {{- end }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: postgresql-password
+ mountPath: /opt/bitnami/postgresql/secrets/
+ {{- end }}
+ {{- if .Values.metrics.customMetrics }}
+ - name: custom-metrics
+ mountPath: /conf
+ readOnly: true
+ {{- end }}
+ {{- if .Values.metrics.resources }}
+ resources: {{- toYaml .Values.metrics.resources | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.sidecars }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.sidecars "context" $ ) | nindent 8 }}
+ {{- end }}
+ volumes:
+ {{- if .Values.readReplicas.extendedConfiguration }}
+ - name: postgresql-extended-config
+ configMap:
+ name: {{ include "postgresql.readReplicas.extendedConfigmapName" . }}
+ {{- end }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: postgresql-password
+ secret:
+ secretName: {{ include "postgresql.secretName" . }}
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: raw-certificates
+ secret:
+ secretName: {{ include "postgresql.tlsSecretName" . }}
+ - name: postgresql-certificates
+ emptyDir: {}
+ {{- end }}
+ {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }}
+ - name: custom-metrics
+ configMap:
+ name: {{ printf "%s-metrics" (include "postgresql.readReplica.fullname" .) }}
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ - name: dshm
+ emptyDir:
+ medium: Memory
+ {{- if .Values.shmVolume.sizeLimit }}
+ sizeLimit: {{ .Values.shmVolume.sizeLimit }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.extraVolumes }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.extraVolumes "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if and .Values.readReplicas.persistence.enabled .Values.readReplicas.persistence.existingClaim }}
+ - name: data
+ persistentVolumeClaim:
+ claimName: {{ tpl .Values.readReplicas.persistence.existingClaim $ }}
+ {{- else if not .Values.readReplicas.persistence.enabled }}
+ - name: data
+ emptyDir: {}
+ {{- else }}
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ {{- if .Values.readReplicas.persistence.annotations }}
+ annotations: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.annotations "context" $) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.readReplicas.persistence.labels }}
+ labels: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.labels "context" $) | nindent 10 }}
+ {{- end }}
+ spec:
+ accessModes:
+ {{- range .Values.readReplicas.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ {{- if .Values.readReplicas.persistence.dataSource }}
+ dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.dataSource "context" $) | nindent 10 }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.readReplicas.persistence.size | quote }}
+ {{- if .Values.readReplicas.persistence.selector }}
+ selector: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.selector "context" $) | nindent 10 }}
+ {{- end -}}
+ {{- include "common.storage.class" (dict "persistence" .Values.readReplicas.persistence "global" .Values.global) | nindent 8 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/read/svc-headless.yaml b/charts/penpot/charts/postgresql/templates/read/svc-headless.yaml
new file mode 100644
index 0000000..ee8f756
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/read/svc-headless.yaml
@@ -0,0 +1,39 @@
+{{- if eq .Values.architecture "replication" }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "postgresql.readReplica.svc.headless" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ app.kubernetes.io/component: read
+ {{- if or .Values.readReplicas.service.headless.annotations .Values.commonAnnotations }}
+ annotations:
+ {{- if .Values.readReplicas.service.headless.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.service.headless.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+ # Use this annotation in addition to the actual publishNotReadyAddresses
+ # field below because the annotation will stop being respected soon but the
+ # field is broken in some versions of Kubernetes:
+ # https://github.com/kubernetes/kubernetes/issues/58662
+ service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
+spec:
+ type: ClusterIP
+ clusterIP: None
+ # We want all pods in the StatefulSet to have their addresses published for
+ # the sake of the other Postgresql pods even before they're ready, since they
+ # have to be able to talk to each other in order to become ready.
+ publishNotReadyAddresses: true
+ ports:
+ - name: tcp-postgresql
+ port: {{ include "postgresql.readReplica.service.port" . }}
+ targetPort: tcp-postgresql
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+ app.kubernetes.io/component: read
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/read/svc.yaml b/charts/penpot/charts/postgresql/templates/read/svc.yaml
new file mode 100644
index 0000000..3eece4d
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/read/svc.yaml
@@ -0,0 +1,53 @@
+{{- if eq .Values.architecture "replication" }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "postgresql.readReplica.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ app.kubernetes.io/component: read
+ annotations:
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.readReplicas.service.annotations }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.annotations "context" $) | nindent 4 }}
+ {{- end }}
+spec:
+ type: {{ .Values.readReplicas.service.type }}
+ {{- if or (eq .Values.readReplicas.service.type "LoadBalancer") (eq .Values.readReplicas.service.type "NodePort") }}
+ externalTrafficPolicy: {{ .Values.readReplicas.service.externalTrafficPolicy | quote }}
+ {{- end }}
+ {{- if and (eq .Values.readReplicas.service.type "LoadBalancer") (not (empty .Values.readReplicas.service.loadBalancerSourceRanges)) }}
+ loadBalancerSourceRanges: {{ .Values.readReplicas.service.loadBalancerSourceRanges }}
+ {{- end }}
+ {{- if and (eq .Values.readReplicas.service.type "LoadBalancer") (not (empty .Values.readReplicas.service.loadBalancerIP)) }}
+ loadBalancerIP: {{ .Values.readReplicas.service.loadBalancerIP }}
+ {{- end }}
+ {{- if and .Values.readReplicas.service.clusterIP (eq .Values.readReplicas.service.type "ClusterIP") }}
+ clusterIP: {{ .Values.readReplicas.service.clusterIP }}
+ {{- end }}
+ {{- if .Values.readReplicas.service.sessionAffinity }}
+ sessionAffinity: {{ .Values.readReplicas.service.sessionAffinity }}
+ {{- end }}
+ {{- if .Values.readReplicas.service.sessionAffinityConfig }}
+ sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.sessionAffinityConfig "context" $) | nindent 4 }}
+ {{- end }}
+ ports:
+ - name: tcp-postgresql
+ port: {{ include "postgresql.readReplica.service.port" . }}
+ targetPort: tcp-postgresql
+ {{- if and (or (eq .Values.readReplicas.service.type "NodePort") (eq .Values.readReplicas.service.type "LoadBalancer")) (not (empty .Values.readReplicas.service.nodePorts.postgresql)) }}
+ nodePort: {{ .Values.readReplicas.service.nodePorts.postgresql }}
+ {{- else if eq .Values.readReplicas.service.type "ClusterIP" }}
+ nodePort: null
+ {{- end }}
+ {{- if .Values.readReplicas.service.extraPorts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.extraPorts "context" $) | nindent 4 }}
+ {{- end }}
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+ app.kubernetes.io/component: read
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/role.yaml b/charts/penpot/charts/postgresql/templates/role.yaml
new file mode 100644
index 0000000..00f9222
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/role.yaml
@@ -0,0 +1,31 @@
+{{- if .Values.rbac.create }}
+kind: Role
+apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+# yamllint disable rule:indentation
+rules:
+ {{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}}
+ {{- if and $pspAvailable .Values.psp.create }}
+ - apiGroups:
+ - 'policy'
+ resources:
+ - 'podsecuritypolicies'
+ verbs:
+ - 'use'
+ resourceNames:
+ - {{ include "common.names.fullname" . }}
+ {{- end }}
+ {{- if .Values.rbac.rules }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }}
+ {{- end }}
+# yamllint enable rule:indentation
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/rolebinding.yaml b/charts/penpot/charts/postgresql/templates/rolebinding.yaml
new file mode 100644
index 0000000..0311c0e
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/rolebinding.yaml
@@ -0,0 +1,22 @@
+{{- if .Values.rbac.create }}
+kind: RoleBinding
+apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+roleRef:
+ kind: Role
+ name: {{ include "common.names.fullname" . }}
+ apiGroup: rbac.authorization.k8s.io
+subjects:
+ - kind: ServiceAccount
+ name: {{ include "postgresql.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace | quote }}
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/secrets.yaml b/charts/penpot/charts/postgresql/templates/secrets.yaml
new file mode 100644
index 0000000..5f28fb3
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/secrets.yaml
@@ -0,0 +1,29 @@
+{{- if (include "postgresql.createSecret" .) }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+type: Opaque
+data:
+ {{- if .Values.auth.enablePostgresUser }}
+ postgres-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "postgres-password" "providedValues" (list "global.postgresql.auth.postgresPassword" "auth.postgresPassword") "context" $) }}
+ {{- end }}
+ {{- if not (empty (include "postgresql.username" .)) }}
+ password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "password" "providedValues" (list "global.postgresql.auth.password" "auth.password") "context" $) }}
+ {{- end }}
+ {{- if eq .Values.architecture "replication" }}
+ replication-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "replication-password" "providedValues" (list "auth.replicationPassword") "context" $) }}
+ {{- end }}
+ # We don't auto-generate LDAP password when it's not provided as we do for other passwords
+ {{- if and .Values.ldap.enabled (or .Values.ldap.bind_password .Values.ldap.bindpw) }}
+ ldap-password: {{ coalesce .Values.ldap.bind_password .Values.ldap.bindpw | b64enc | quote }}
+ {{- end }}
+{{- end -}}
diff --git a/charts/penpot/charts/postgresql/templates/serviceaccount.yaml b/charts/penpot/charts/postgresql/templates/serviceaccount.yaml
new file mode 100644
index 0000000..179f8f2
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/serviceaccount.yaml
@@ -0,0 +1,19 @@
+{{- if .Values.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "postgresql.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ annotations:
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.serviceAccount.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/templates/tls-secrets.yaml b/charts/penpot/charts/postgresql/templates/tls-secrets.yaml
new file mode 100644
index 0000000..59c5776
--- /dev/null
+++ b/charts/penpot/charts/postgresql/templates/tls-secrets.yaml
@@ -0,0 +1,27 @@
+{{- if (include "postgresql.createTlsSecret" . ) }}
+{{- $ca := genCA "postgresql-ca" 365 }}
+{{- $fullname := include "common.names.fullname" . }}
+{{- $releaseNamespace := .Release.Namespace }}
+{{- $clusterDomain := .Values.clusterDomain }}
+{{- $primaryHeadlessServiceName := include "postgresql.primary.svc.headless" . }}
+{{- $readHeadlessServiceName := include "postgresql.readReplica.svc.headless" . }}
+{{- $altNames := list (printf "*.%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $readHeadlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $readHeadlessServiceName $releaseNamespace $clusterDomain) $fullname }}
+{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ printf "%s-crt" (include "common.names.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+type: kubernetes.io/tls
+data:
+ ca.crt: {{ $ca.Cert | b64enc | quote }}
+ tls.crt: {{ $crt.Cert | b64enc | quote }}
+ tls.key: {{ $crt.Key | b64enc | quote }}
+{{- end }}
diff --git a/charts/penpot/charts/postgresql/values.schema.json b/charts/penpot/charts/postgresql/values.schema.json
new file mode 100644
index 0000000..fc41483
--- /dev/null
+++ b/charts/penpot/charts/postgresql/values.schema.json
@@ -0,0 +1,156 @@
+{
+ "$schema": "http://json-schema.org/schema#",
+ "type": "object",
+ "properties": {
+ "architecture": {
+ "type": "string",
+ "title": "PostgreSQL architecture",
+ "form": true,
+ "description": "Allowed values: `standalone` or `replication`"
+ },
+ "auth": {
+ "type": "object",
+ "title": "Authentication configuration",
+ "form": true,
+ "properties": {
+ "enablePostgresUser": {
+ "type": "boolean",
+ "title": "Enable \"postgres\" admin user",
+ "description": "Assign a password to the \"postgres\" admin user. Otherwise, remote access will be blocked for this user",
+ "form": true
+ },
+ "postgresPassword": {
+ "type": "string",
+ "title": "Password for the \"postgres\" admin user",
+ "description": "Defaults to a random 10-character alphanumeric string if not set",
+ "form": true
+ },
+ "database": {
+ "type": "string",
+ "title": "PostgreSQL custom database",
+ "description": "Name of the custom database to be created during the 1st initialization of PostgreSQL",
+ "form": true
+ },
+ "username": {
+ "type": "string",
+ "title": "PostgreSQL custom user",
+ "description": "Name of the custom user to be created during the 1st initialization of PostgreSQL. This user only has permissions on the PostgreSQL custom database",
+ "form": true
+ },
+ "password": {
+ "type": "string",
+ "title": "Password for the custom user to create",
+ "description": "Defaults to a random 10-character alphanumeric string if not set",
+ "form": true
+ },
+ "replicationUsername": {
+ "type": "string",
+ "title": "PostgreSQL replication user",
+ "description": "Name of user used to manage replication.",
+ "form": true,
+ "hidden": {
+ "value": "standalone",
+ "path": "architecture"
+ }
+ },
+ "replicationPassword": {
+ "type": "string",
+ "title": "Password for PostgreSQL replication user",
+ "description": "Defaults to a random 10-character alphanumeric string if not set",
+ "form": true,
+ "hidden": {
+ "value": "standalone",
+ "path": "architecture"
+ }
+ }
+ }
+ },
+ "persistence": {
+ "type": "object",
+ "properties": {
+ "size": {
+ "type": "string",
+ "title": "Persistent Volume Size",
+ "form": true,
+ "render": "slider",
+ "sliderMin": 1,
+ "sliderMax": 100,
+ "sliderUnit": "Gi"
+ }
+ }
+ },
+ "resources": {
+ "type": "object",
+ "title": "Required Resources",
+ "description": "Configure resource requests",
+ "form": true,
+ "properties": {
+ "requests": {
+ "type": "object",
+ "properties": {
+ "memory": {
+ "type": "string",
+ "form": true,
+ "render": "slider",
+ "title": "Memory Request",
+ "sliderMin": 10,
+ "sliderMax": 2048,
+ "sliderUnit": "Mi"
+ },
+ "cpu": {
+ "type": "string",
+ "form": true,
+ "render": "slider",
+ "title": "CPU Request",
+ "sliderMin": 10,
+ "sliderMax": 2000,
+ "sliderUnit": "m"
+ }
+ }
+ }
+ }
+ },
+ "replication": {
+ "type": "object",
+ "form": true,
+ "title": "Replication Details",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "title": "Enable Replication",
+ "form": true
+ },
+ "readReplicas": {
+ "type": "integer",
+ "title": "read Replicas",
+ "form": true,
+ "hidden": {
+ "value": "standalone",
+ "path": "architecture"
+ }
+ }
+ }
+ },
+ "volumePermissions": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "form": true,
+ "title": "Enable Init Containers",
+ "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup"
+ }
+ }
+ },
+ "metrics": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "title": "Configure metrics exporter",
+ "form": true
+ }
+ }
+ }
+ }
+}
diff --git a/charts/penpot/charts/postgresql/values.yaml b/charts/penpot/charts/postgresql/values.yaml
new file mode 100644
index 0000000..b4785a4
--- /dev/null
+++ b/charts/penpot/charts/postgresql/values.yaml
@@ -0,0 +1,1411 @@
+## @section Global parameters
+## Please, note that this will override the parameters, including dependencies, configured to use the global value
+##
+global:
+ ## @param global.imageRegistry Global Docker image registry
+ ##
+ imageRegistry: ""
+ ## @param global.imagePullSecrets Global Docker registry secret names as an array
+ ## e.g.
+ ## imagePullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ imagePullSecrets: []
+ ## @param global.storageClass Global StorageClass for Persistent Volume(s)
+ ##
+ storageClass: ""
+ postgresql:
+ ## @param global.postgresql.auth.postgresPassword Password for the "postgres" admin user (overrides `auth.postgresPassword`)
+ ## @param global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`)
+ ## @param global.postgresql.auth.password Password for the custom user to create (overrides `auth.password`)
+ ## @param global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`)
+ ## @param global.postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`).
+ ## @param global.postgresql.auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set.
+ ## @param global.postgresql.auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set.
+ ## @param global.postgresql.auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set.
+ ##
+ auth:
+ postgresPassword: ""
+ username: ""
+ password: ""
+ database: ""
+ existingSecret: ""
+ secretKeys:
+ adminPasswordKey: ""
+ userPasswordKey: ""
+ replicationPasswordKey: ""
+ ## @param global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`)
+ ##
+ service:
+ ports:
+ postgresql: ""
+
+## @section Common parameters
+##
+
+## @param kubeVersion Override Kubernetes version
+##
+kubeVersion: ""
+## @param nameOverride String to partially override common.names.fullname template (will maintain the release name)
+##
+nameOverride: ""
+## @param fullnameOverride String to fully override common.names.fullname template
+##
+fullnameOverride: ""
+## @param clusterDomain Kubernetes Cluster Domain
+##
+clusterDomain: cluster.local
+## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template)
+##
+extraDeploy: []
+## @param commonLabels Add labels to all the deployed resources
+##
+commonLabels: {}
+## @param commonAnnotations Add annotations to all the deployed resources
+##
+commonAnnotations: {}
+## Enable diagnostic mode in the statefulset
+##
+diagnosticMode:
+ ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
+ ##
+ enabled: false
+ ## @param diagnosticMode.command Command to override all containers in the statefulset
+ ##
+ command:
+ - sleep
+ ## @param diagnosticMode.args Args to override all containers in the statefulset
+ ##
+ args:
+ - infinity
+
+## @section PostgreSQL common parameters
+##
+
+## Bitnami PostgreSQL image version
+## ref: https://hub.docker.com/r/bitnami/postgresql/tags/
+## @param image.registry PostgreSQL image registry
+## @param image.repository PostgreSQL image repository
+## @param image.tag PostgreSQL image tag (immutable tags are recommended)
+## @param image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+## @param image.pullPolicy PostgreSQL image pull policy
+## @param image.pullSecrets Specify image pull secrets
+## @param image.debug Specify if debug values should be set
+##
+image:
+ registry: docker.io
+ repository: bitnami/postgresql
+ tag: 15.1.0-debian-11-r20
+ digest: ""
+ ## Specify a imagePullPolicy
+ ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+ ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+ ##
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## Example:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+ ## Set to true if you would like to see extra information on logs
+ ##
+ debug: false
+## Authentication parameters
+## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#setting-the-root-password-on-first-run
+## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-on-first-run
+## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-user-on-first-run
+##
+auth:
+ ## @param auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user
+ ##
+ enablePostgresUser: true
+ ## @param auth.postgresPassword Password for the "postgres" admin user. Ignored if `auth.existingSecret` with key `postgres-password` is provided
+ ##
+ postgresPassword: ""
+ ## @param auth.username Name for a custom user to create
+ ##
+ username: ""
+ ## @param auth.password Password for the custom user to create. Ignored if `auth.existingSecret` with key `password` is provided
+ ##
+ password: ""
+ ## @param auth.database Name for a custom database to create
+ ##
+ database: ""
+ ## @param auth.replicationUsername Name of the replication user
+ ##
+ replicationUsername: repl_user
+ ## @param auth.replicationPassword Password for the replication user. Ignored if `auth.existingSecret` with key `replication-password` is provided
+ ##
+ replicationPassword: ""
+ ## @param auth.existingSecret Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case.
+ ##
+ existingSecret: ""
+ ## @param auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set.
+ ## @param auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set.
+ ## @param auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set.
+ ##
+ secretKeys:
+ adminPasswordKey: postgres-password
+ userPasswordKey: password
+ replicationPasswordKey: replication-password
+ ## @param auth.usePasswordFiles Mount credentials as a files instead of using an environment variable
+ ##
+ usePasswordFiles: false
+## @param architecture PostgreSQL architecture (`standalone` or `replication`)
+##
+architecture: standalone
+## Replication configuration
+## Ignored if `architecture` is `standalone`
+##
+replication:
+ ## @param replication.synchronousCommit Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off`
+ ## @param replication.numSynchronousReplicas Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`.
+ ## ref: https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT
+ ##
+ synchronousCommit: "off"
+ numSynchronousReplicas: 0
+ ## @param replication.applicationName Cluster application name. Useful for advanced replication settings
+ ##
+ applicationName: my_application
+## @param containerPorts.postgresql PostgreSQL container port
+##
+containerPorts:
+ postgresql: 5432
+## Audit settings
+## https://github.com/bitnami/containers/tree/main/bitnami/postgresql#auditing
+## @param audit.logHostname Log client hostnames
+## @param audit.logConnections Add client log-in operations to the log file
+## @param audit.logDisconnections Add client log-outs operations to the log file
+## @param audit.pgAuditLog Add operations to log using the pgAudit extension
+## @param audit.pgAuditLogCatalog Log catalog using pgAudit
+## @param audit.clientMinMessages Message log level to share with the user
+## @param audit.logLinePrefix Template for log line prefix (default if not set)
+## @param audit.logTimezone Timezone for the log timestamps
+##
+audit:
+ logHostname: false
+ logConnections: false
+ logDisconnections: false
+ pgAuditLog: ""
+ pgAuditLogCatalog: "off"
+ clientMinMessages: error
+ logLinePrefix: ""
+ logTimezone: ""
+## LDAP configuration
+## @param ldap.enabled Enable LDAP support
+## DEPRECATED ldap.url It will removed in a future, please use 'ldap.uri' instead
+## @param ldap.server IP address or name of the LDAP server.
+## @param ldap.port Port number on the LDAP server to connect to
+## @param ldap.prefix String to prepend to the user name when forming the DN to bind
+## @param ldap.suffix String to append to the user name when forming the DN to bind
+## DEPRECATED ldap.baseDN It will removed in a future, please use 'ldap.basedn' instead
+## DEPRECATED ldap.bindDN It will removed in a future, please use 'ldap.binddn' instead
+## DEPRECATED ldap.bind_password It will removed in a future, please use 'ldap.bindpw' instead
+## @param ldap.basedn Root DN to begin the search for the user in
+## @param ldap.binddn DN of user to bind to LDAP
+## @param ldap.bindpw Password for the user to bind to LDAP
+## DEPRECATED ldap.search_attr It will removed in a future, please use 'ldap.searchAttribute' instead
+## DEPRECATED ldap.search_filter It will removed in a future, please use 'ldap.searchFilter' instead
+## @param ldap.searchAttribute Attribute to match against the user name in the search
+## @param ldap.searchFilter The search filter to use when doing search+bind authentication
+## @param ldap.scheme Set to `ldaps` to use LDAPS
+## DEPRECATED ldap.tls as string is deprecated,please use 'ldap.tls.enabled' instead
+## @param ldap.tls.enabled Se to true to enable TLS encryption
+##
+ldap:
+ enabled: false
+ server: ""
+ port: ""
+ prefix: ""
+ suffix: ""
+ basedn: ""
+ binddn: ""
+ bindpw: ""
+ searchAttribute: ""
+ searchFilter: ""
+ scheme: ""
+ tls:
+ enabled: false
+ ## @param ldap.uri LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored.
+ ## Ref: https://www.postgresql.org/docs/current/auth-ldap.html
+ uri: ""
+## @param postgresqlDataDir PostgreSQL data dir folder
+##
+postgresqlDataDir: /bitnami/postgresql/data
+## @param postgresqlSharedPreloadLibraries Shared preload libraries (comma-separated list)
+##
+postgresqlSharedPreloadLibraries: "pgaudit"
+## Start PostgreSQL pod(s) without limitations on shm memory.
+## By default docker and containerd (and possibly other container runtimes) limit `/dev/shm` to `64M`
+## ref: https://github.com/docker-library/postgres/issues/416
+## ref: https://github.com/containerd/containerd/issues/3654
+##
+shmVolume:
+ ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s)
+ ##
+ enabled: true
+ ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs
+ ## Note: the size of the tmpfs counts against container's memory limit
+ ## e.g:
+ ## sizeLimit: 1Gi
+ ##
+ sizeLimit: ""
+## TLS configuration
+##
+tls:
+ ## @param tls.enabled Enable TLS traffic support
+ ##
+ enabled: false
+ ## @param tls.autoGenerated Generate automatically self-signed TLS certificates
+ ##
+ autoGenerated: false
+ ## @param tls.preferServerCiphers Whether to use the server's TLS cipher preferences rather than the client's
+ ##
+ preferServerCiphers: true
+ ## @param tls.certificatesSecret Name of an existing secret that contains the certificates
+ ##
+ certificatesSecret: ""
+ ## @param tls.certFilename Certificate filename
+ ##
+ certFilename: ""
+ ## @param tls.certKeyFilename Certificate key filename
+ ##
+ certKeyFilename: ""
+ ## @param tls.certCAFilename CA Certificate filename
+ ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate
+ ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html
+ ##
+ certCAFilename: ""
+ ## @param tls.crlFilename File containing a Certificate Revocation List
+ ##
+ crlFilename: ""
+
+## @section PostgreSQL Primary parameters
+##
+primary:
+ ## @param primary.name Name of the primary database (eg primary, master, leader, ...)
+ ##
+ name: primary
+ ## @param primary.configuration PostgreSQL Primary main configuration to be injected as ConfigMap
+ ## ref: https://www.postgresql.org/docs/current/static/runtime-config.html
+ ##
+ configuration: ""
+ ## @param primary.pgHbaConfiguration PostgreSQL Primary client authentication configuration
+ ## ref: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html
+ ## e.g:#
+ ## pgHbaConfiguration: |-
+ ## local all all trust
+ ## host all all localhost trust
+ ## host mydatabase mysuser 192.168.0.0/24 md5
+ ##
+ pgHbaConfiguration: ""
+ ## @param primary.existingConfigmap Name of an existing ConfigMap with PostgreSQL Primary configuration
+ ## NOTE: `primary.configuration` and `primary.pgHbaConfiguration` will be ignored
+ ##
+ existingConfigmap: ""
+ ## @param primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration)
+ ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
+ ##
+ extendedConfiguration: ""
+ ## @param primary.existingExtendedConfigmap Name of an existing ConfigMap with PostgreSQL Primary extended configuration
+ ## NOTE: `primary.extendedConfiguration` will be ignored
+ ##
+ existingExtendedConfigmap: ""
+ ## Initdb configuration
+ ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#specifying-initdb-arguments
+ ##
+ initdb:
+ ## @param primary.initdb.args PostgreSQL initdb extra arguments
+ ##
+ args: ""
+ ## @param primary.initdb.postgresqlWalDir Specify a custom location for the PostgreSQL transaction log
+ ##
+ postgresqlWalDir: ""
+ ## @param primary.initdb.scripts Dictionary of initdb scripts
+ ## Specify dictionary of scripts to be run at first boot
+ ## e.g:
+ ## scripts:
+ ## my_init_script.sh: |
+ ## #!/bin/sh
+ ## echo "Do something."
+ ##
+ scripts: {}
+ ## @param primary.initdb.scriptsConfigMap ConfigMap with scripts to be run at first boot
+ ## NOTE: This will override `primary.initdb.scripts`
+ ##
+ scriptsConfigMap: ""
+ ## @param primary.initdb.scriptsSecret Secret with scripts to be run at first boot (in case it contains sensitive information)
+ ## NOTE: This can work along `primary.initdb.scripts` or `primary.initdb.scriptsConfigMap`
+ ##
+ scriptsSecret: ""
+ ## @param primary.initdb.user Specify the PostgreSQL username to execute the initdb scripts
+ ##
+ user: ""
+ ## @param primary.initdb.password Specify the PostgreSQL password to execute the initdb scripts
+ ##
+ password: ""
+ ## Configure current cluster's primary server to be the standby server in other cluster.
+ ## This will allow cross cluster replication and provide cross cluster high availability.
+ ## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled.
+ ## @param primary.standby.enabled Whether to enable current cluster's primary as standby server of another cluster or not
+ ## @param primary.standby.primaryHost The Host of replication primary in the other cluster
+ ## @param primary.standby.primaryPort The Port of replication primary in the other cluster
+ ##
+ standby:
+ enabled: false
+ primaryHost: ""
+ primaryPort: ""
+ ## @param primary.extraEnvVars Array with extra environment variables to add to PostgreSQL Primary nodes
+ ## e.g:
+ ## extraEnvVars:
+ ## - name: FOO
+ ## value: "bar"
+ ##
+ extraEnvVars: []
+ ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes
+ ##
+ extraEnvVarsCM: ""
+ ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL Primary nodes
+ ##
+ extraEnvVarsSecret: ""
+ ## @param primary.command Override default container command (useful when using custom images)
+ ##
+ command: []
+ ## @param primary.args Override default container args (useful when using custom images)
+ ##
+ args: []
+ ## Configure extra options for PostgreSQL Primary containers' liveness, readiness and startup probes
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
+ ## @param primary.livenessProbe.enabled Enable livenessProbe on PostgreSQL Primary containers
+ ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+ ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe
+ ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+ ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe
+ ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe
+ ##
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## @param primary.readinessProbe.enabled Enable readinessProbe on PostgreSQL Primary containers
+ ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+ ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe
+ ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+ ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe
+ ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe
+ ##
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## @param primary.startupProbe.enabled Enable startupProbe on PostgreSQL Primary containers
+ ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
+ ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe
+ ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe
+ ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe
+ ## @param primary.startupProbe.successThreshold Success threshold for startupProbe
+ ##
+ startupProbe:
+ enabled: false
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 1
+ failureThreshold: 15
+ successThreshold: 1
+ ## @param primary.customLivenessProbe Custom livenessProbe that overrides the default one
+ ##
+ customLivenessProbe: {}
+ ## @param primary.customReadinessProbe Custom readinessProbe that overrides the default one
+ ##
+ customReadinessProbe: {}
+ ## @param primary.customStartupProbe Custom startupProbe that overrides the default one
+ ##
+ customStartupProbe: {}
+ ## @param primary.lifecycleHooks for the PostgreSQL Primary container to automate configuration before or after startup
+ ##
+ lifecycleHooks: {}
+ ## PostgreSQL Primary resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param primary.resources.limits The resources limits for the PostgreSQL Primary containers
+ ## @param primary.resources.requests.memory The requested memory for the PostgreSQL Primary containers
+ ## @param primary.resources.requests.cpu The requested cpu for the PostgreSQL Primary containers
+ ##
+ resources:
+ limits: {}
+ requests:
+ memory: 256Mi
+ cpu: 250m
+ ## Pod Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ ## @param primary.podSecurityContext.enabled Enable security context
+ ## @param primary.podSecurityContext.fsGroup Group ID for the pod
+ ##
+ podSecurityContext:
+ enabled: true
+ fsGroup: 1001
+ ## Container Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ ## @param primary.containerSecurityContext.enabled Enable container security context
+ ## @param primary.containerSecurityContext.runAsUser User ID for the container
+ ##
+ containerSecurityContext:
+ enabled: true
+ runAsUser: 1001
+ ## @param primary.hostAliases PostgreSQL primary pods host aliases
+ ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
+ ##
+ hostAliases: []
+ ## @param primary.hostNetwork Specify if host network should be enabled for PostgreSQL pod (postgresql primary)
+ ##
+ hostNetwork: false
+ ## @param primary.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary)
+ ##
+ hostIPC: false
+ ## @param primary.labels Map of labels to add to the statefulset (postgresql primary)
+ ##
+ labels: {}
+ ## @param primary.annotations Annotations for PostgreSQL primary pods
+ ##
+ annotations: {}
+ ## @param primary.podLabels Map of labels to add to the pods (postgresql primary)
+ ##
+ podLabels: {}
+ ## @param primary.podAnnotations Map of annotations to add to the pods (postgresql primary)
+ ##
+ podAnnotations: {}
+ ## @param primary.podAffinityPreset PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAffinityPreset: ""
+ ## @param primary.podAntiAffinityPreset PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAntiAffinityPreset: soft
+ ## PostgreSQL Primary node affinity preset
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+ ##
+ nodeAffinityPreset:
+ ## @param primary.nodeAffinityPreset.type PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
+ ##
+ type: ""
+ ## @param primary.nodeAffinityPreset.key PostgreSQL primary node label key to match Ignored if `primary.affinity` is set.
+ ## E.g.
+ ## key: "kubernetes.io/e2e-az-name"
+ ##
+ key: ""
+ ## @param primary.nodeAffinityPreset.values PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set.
+ ## E.g.
+ ## values:
+ ## - e2e-az1
+ ## - e2e-az2
+ ##
+ values: []
+ ## @param primary.affinity Affinity for PostgreSQL primary pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set
+ ##
+ affinity: {}
+ ## @param primary.nodeSelector Node labels for PostgreSQL primary pods assignment
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+ ## @param primary.tolerations Tolerations for PostgreSQL primary pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ ## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
+ ##
+ topologySpreadConstraints: []
+ ## @param primary.priorityClassName Priority Class to use for each pod (postgresql primary)
+ ##
+ priorityClassName: ""
+ ## @param primary.schedulerName Use an alternate scheduler, e.g. "stork".
+ ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+ ##
+ schedulerName: ""
+ ## @param primary.terminationGracePeriodSeconds Seconds PostgreSQL primary pod needs to terminate gracefully
+ ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
+ ##
+ terminationGracePeriodSeconds: ""
+ ## @param primary.updateStrategy.type PostgreSQL Primary statefulset strategy type
+ ## @param primary.updateStrategy.rollingUpdate PostgreSQL Primary statefulset rolling update configuration parameters
+ ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+ ##
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate: {}
+ ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s)
+ ##
+ extraVolumeMounts: []
+ ## @param primary.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s)
+ ##
+ extraVolumes: []
+ ## @param primary.sidecars Add additional sidecar containers to the PostgreSQL Primary pod(s)
+ ## For example:
+ ## sidecars:
+ ## - name: your-image-name
+ ## image: your-image
+ ## imagePullPolicy: Always
+ ## ports:
+ ## - name: portname
+ ## containerPort: 1234
+ ##
+ sidecars: []
+ ## @param primary.initContainers Add additional init containers to the PostgreSQL Primary pod(s)
+ ## Example
+ ##
+ ## initContainers:
+ ## - name: do-something
+ ## image: busybox
+ ## command: ['do', 'something']
+ ##
+ initContainers: []
+ ## @param primary.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL Primary pod(s)
+ ##
+ extraPodSpec: {}
+ ## PostgreSQL Primary service configuration
+ ##
+ service:
+ ## @param primary.service.type Kubernetes Service type
+ ##
+ type: ClusterIP
+ ## @param primary.service.ports.postgresql PostgreSQL service port
+ ##
+ ports:
+ postgresql: 5432
+ ## Node ports to expose
+ ## NOTE: choose port between <30000-32767>
+ ## @param primary.service.nodePorts.postgresql Node port for PostgreSQL
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ ##
+ nodePorts:
+ postgresql: ""
+ ## @param primary.service.clusterIP Static clusterIP or None for headless services
+ ## e.g:
+ ## clusterIP: None
+ ##
+ clusterIP: ""
+ ## @param primary.service.annotations Annotations for PostgreSQL primary service
+ ##
+ annotations: {}
+ ## @param primary.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer`
+ ## Set the LoadBalancer service type to internal only
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+ ##
+ loadBalancerIP: ""
+ ## @param primary.service.externalTrafficPolicy Enable client source IP preservation
+ ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ ##
+ externalTrafficPolicy: Cluster
+ ## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer
+ ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
+ ##
+ ## loadBalancerSourceRanges:
+ ## - 10.10.10.0/24
+ ##
+ loadBalancerSourceRanges: []
+ ## @param primary.service.extraPorts Extra ports to expose in the PostgreSQL primary service
+ ##
+ extraPorts: []
+ ## @param primary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
+ ## If "ClientIP", consecutive client requests will be directed to the same Pod
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ ##
+ sessionAffinity: None
+ ## @param primary.service.sessionAffinityConfig Additional settings for the sessionAffinity
+ ## sessionAffinityConfig:
+ ## clientIP:
+ ## timeoutSeconds: 300
+ ##
+ sessionAffinityConfig: {}
+ ## Headless service properties
+ ##
+ headless:
+ ## @param primary.service.headless.annotations Additional custom annotations for headless PostgreSQL primary service
+ ##
+ annotations: {}
+ ## PostgreSQL Primary persistence configuration
+ ##
+ persistence:
+ ## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC
+ ##
+ enabled: true
+ ## @param primary.persistence.existingClaim Name of an existing PVC to use
+ ##
+ existingClaim: ""
+ ## @param primary.persistence.mountPath The path the volume will be mounted at
+ ## Note: useful when using custom PostgreSQL images
+ ##
+ mountPath: /bitnami/postgresql
+ ## @param primary.persistence.subPath The subdirectory of the volume to mount to
+ ## Useful in dev environments and one PV for multiple services
+ ##
+ subPath: ""
+ ## @param primary.persistence.storageClass PVC Storage Class for PostgreSQL Primary data volume
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ storageClass: ""
+ ## @param primary.persistence.accessModes PVC Access Mode for PostgreSQL volume
+ ##
+ accessModes:
+ - ReadWriteOnce
+ ## @param primary.persistence.size PVC Storage Request for PostgreSQL volume
+ ##
+ size: 8Gi
+ ## @param primary.persistence.annotations Annotations for the PVC
+ ##
+ annotations: {}
+ ## @param primary.persistence.labels Labels for the PVC
+ ##
+ labels: {}
+ ## @param primary.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template)
+ ## selector:
+ ## matchLabels:
+ ## app: my-app
+ ##
+ selector: {}
+ ## @param primary.persistence.dataSource Custom PVC data source
+ ##
+ dataSource: {}
+
+## @section PostgreSQL read only replica parameters (only used when `architecture` is set to `replication`)
+##
+readReplicas:
+ ## @param readReplicas.name Name of the read replicas database (eg secondary, slave, ...)
+ ##
+ name: read
+ ## @param readReplicas.replicaCount Number of PostgreSQL read only replicas
+ ##
+ replicaCount: 1
+ ## @param readReplicas.extendedConfiguration Extended PostgreSQL read only replicas configuration (appended to main or default configuration)
+ ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
+ ##
+ extendedConfiguration: ""
+ ## @param readReplicas.extraEnvVars Array with extra environment variables to add to PostgreSQL read only nodes
+ ## e.g:
+ ## extraEnvVars:
+ ## - name: FOO
+ ## value: "bar"
+ ##
+ extraEnvVars: []
+ ## @param readReplicas.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes
+ ##
+ extraEnvVarsCM: ""
+ ## @param readReplicas.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL read only nodes
+ ##
+ extraEnvVarsSecret: ""
+ ## @param readReplicas.command Override default container command (useful when using custom images)
+ ##
+ command: []
+ ## @param readReplicas.args Override default container args (useful when using custom images)
+ ##
+ args: []
+ ## Configure extra options for PostgreSQL read only containers' liveness, readiness and startup probes
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
+ ## @param readReplicas.livenessProbe.enabled Enable livenessProbe on PostgreSQL read only containers
+ ## @param readReplicas.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+ ## @param readReplicas.livenessProbe.periodSeconds Period seconds for livenessProbe
+ ## @param readReplicas.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+ ## @param readReplicas.livenessProbe.failureThreshold Failure threshold for livenessProbe
+ ## @param readReplicas.livenessProbe.successThreshold Success threshold for livenessProbe
+ ##
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## @param readReplicas.readinessProbe.enabled Enable readinessProbe on PostgreSQL read only containers
+ ## @param readReplicas.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+ ## @param readReplicas.readinessProbe.periodSeconds Period seconds for readinessProbe
+ ## @param readReplicas.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+ ## @param readReplicas.readinessProbe.failureThreshold Failure threshold for readinessProbe
+ ## @param readReplicas.readinessProbe.successThreshold Success threshold for readinessProbe
+ ##
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## @param readReplicas.startupProbe.enabled Enable startupProbe on PostgreSQL read only containers
+ ## @param readReplicas.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
+ ## @param readReplicas.startupProbe.periodSeconds Period seconds for startupProbe
+ ## @param readReplicas.startupProbe.timeoutSeconds Timeout seconds for startupProbe
+ ## @param readReplicas.startupProbe.failureThreshold Failure threshold for startupProbe
+ ## @param readReplicas.startupProbe.successThreshold Success threshold for startupProbe
+ ##
+ startupProbe:
+ enabled: false
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 1
+ failureThreshold: 15
+ successThreshold: 1
+ ## @param readReplicas.customLivenessProbe Custom livenessProbe that overrides the default one
+ ##
+ customLivenessProbe: {}
+ ## @param readReplicas.customReadinessProbe Custom readinessProbe that overrides the default one
+ ##
+ customReadinessProbe: {}
+ ## @param readReplicas.customStartupProbe Custom startupProbe that overrides the default one
+ ##
+ customStartupProbe: {}
+ ## @param readReplicas.lifecycleHooks for the PostgreSQL read only container to automate configuration before or after startup
+ ##
+ lifecycleHooks: {}
+ ## PostgreSQL read only resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param readReplicas.resources.limits The resources limits for the PostgreSQL read only containers
+ ## @param readReplicas.resources.requests.memory The requested memory for the PostgreSQL read only containers
+ ## @param readReplicas.resources.requests.cpu The requested cpu for the PostgreSQL read only containers
+ ##
+ resources:
+ limits: {}
+ requests:
+ memory: 256Mi
+ cpu: 250m
+ ## Pod Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ ## @param readReplicas.podSecurityContext.enabled Enable security context
+ ## @param readReplicas.podSecurityContext.fsGroup Group ID for the pod
+ ##
+ podSecurityContext:
+ enabled: true
+ fsGroup: 1001
+ ## Container Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ ## @param readReplicas.containerSecurityContext.enabled Enable container security context
+ ## @param readReplicas.containerSecurityContext.runAsUser User ID for the container
+ ##
+ containerSecurityContext:
+ enabled: true
+ runAsUser: 1001
+ ## @param readReplicas.hostAliases PostgreSQL read only pods host aliases
+ ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
+ ##
+ hostAliases: []
+ ## @param readReplicas.hostNetwork Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only)
+ ##
+ hostNetwork: false
+ ## @param readReplicas.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary)
+ ##
+ hostIPC: false
+ ## @param readReplicas.labels Map of labels to add to the statefulset (PostgreSQL read only)
+ ##
+ labels: {}
+ ## @param readReplicas.annotations Annotations for PostgreSQL read only pods
+ ##
+ annotations: {}
+ ## @param readReplicas.podLabels Map of labels to add to the pods (PostgreSQL read only)
+ ##
+ podLabels: {}
+ ## @param readReplicas.podAnnotations Map of annotations to add to the pods (PostgreSQL read only)
+ ##
+ podAnnotations: {}
+ ## @param readReplicas.podAffinityPreset PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAffinityPreset: ""
+ ## @param readReplicas.podAntiAffinityPreset PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAntiAffinityPreset: soft
+ ## PostgreSQL read only node affinity preset
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+ ##
+ nodeAffinityPreset:
+ ## @param readReplicas.nodeAffinityPreset.type PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
+ ##
+ type: ""
+ ## @param readReplicas.nodeAffinityPreset.key PostgreSQL read only node label key to match Ignored if `primary.affinity` is set.
+ ## E.g.
+ ## key: "kubernetes.io/e2e-az-name"
+ ##
+ key: ""
+ ## @param readReplicas.nodeAffinityPreset.values PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set.
+ ## E.g.
+ ## values:
+ ## - e2e-az1
+ ## - e2e-az2
+ ##
+ values: []
+ ## @param readReplicas.affinity Affinity for PostgreSQL read only pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set
+ ##
+ affinity: {}
+ ## @param readReplicas.nodeSelector Node labels for PostgreSQL read only pods assignment
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+ ## @param readReplicas.tolerations Tolerations for PostgreSQL read only pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ ## @param readReplicas.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
+ ##
+ topologySpreadConstraints: []
+ ## @param readReplicas.priorityClassName Priority Class to use for each pod (PostgreSQL read only)
+ ##
+ priorityClassName: ""
+ ## @param readReplicas.schedulerName Use an alternate scheduler, e.g. "stork".
+ ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+ ##
+ schedulerName: ""
+ ## @param readReplicas.terminationGracePeriodSeconds Seconds PostgreSQL read only pod needs to terminate gracefully
+ ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
+ ##
+ terminationGracePeriodSeconds: ""
+ ## @param readReplicas.updateStrategy.type PostgreSQL read only statefulset strategy type
+ ## @param readReplicas.updateStrategy.rollingUpdate PostgreSQL read only statefulset rolling update configuration parameters
+ ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+ ##
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate: {}
+ ## @param readReplicas.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s)
+ ##
+ extraVolumeMounts: []
+ ## @param readReplicas.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s)
+ ##
+ extraVolumes: []
+ ## @param readReplicas.sidecars Add additional sidecar containers to the PostgreSQL read only pod(s)
+ ## For example:
+ ## sidecars:
+ ## - name: your-image-name
+ ## image: your-image
+ ## imagePullPolicy: Always
+ ## ports:
+ ## - name: portname
+ ## containerPort: 1234
+ ##
+ sidecars: []
+ ## @param readReplicas.initContainers Add additional init containers to the PostgreSQL read only pod(s)
+ ## Example
+ ##
+ ## initContainers:
+ ## - name: do-something
+ ## image: busybox
+ ## command: ['do', 'something']
+ ##
+ initContainers: []
+ ## @param readReplicas.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL read only pod(s)
+ ##
+ extraPodSpec: {}
+ ## PostgreSQL read only service configuration
+ ##
+ service:
+ ## @param readReplicas.service.type Kubernetes Service type
+ ##
+ type: ClusterIP
+ ## @param readReplicas.service.ports.postgresql PostgreSQL service port
+ ##
+ ports:
+ postgresql: 5432
+ ## Node ports to expose
+ ## NOTE: choose port between <30000-32767>
+ ## @param readReplicas.service.nodePorts.postgresql Node port for PostgreSQL
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ ##
+ nodePorts:
+ postgresql: ""
+ ## @param readReplicas.service.clusterIP Static clusterIP or None for headless services
+ ## e.g:
+ ## clusterIP: None
+ ##
+ clusterIP: ""
+ ## @param readReplicas.service.annotations Annotations for PostgreSQL read only service
+ ##
+ annotations: {}
+ ## @param readReplicas.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer`
+ ## Set the LoadBalancer service type to internal only
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+ ##
+ loadBalancerIP: ""
+ ## @param readReplicas.service.externalTrafficPolicy Enable client source IP preservation
+ ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ ##
+ externalTrafficPolicy: Cluster
+ ## @param readReplicas.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer
+ ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
+ ##
+ ## loadBalancerSourceRanges:
+ ## - 10.10.10.0/24
+ ##
+ loadBalancerSourceRanges: []
+ ## @param readReplicas.service.extraPorts Extra ports to expose in the PostgreSQL read only service
+ ##
+ extraPorts: []
+ ## @param readReplicas.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
+ ## If "ClientIP", consecutive client requests will be directed to the same Pod
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ ##
+ sessionAffinity: None
+ ## @param readReplicas.service.sessionAffinityConfig Additional settings for the sessionAffinity
+ ## sessionAffinityConfig:
+ ## clientIP:
+ ## timeoutSeconds: 300
+ ##
+ sessionAffinityConfig: {}
+ ## Headless service properties
+ ##
+ headless:
+ ## @param readReplicas.service.headless.annotations Additional custom annotations for headless PostgreSQL read only service
+ ##
+ annotations: {}
+ ## PostgreSQL read only persistence configuration
+ ##
+ persistence:
+ ## @param readReplicas.persistence.enabled Enable PostgreSQL read only data persistence using PVC
+ ##
+ enabled: true
+ ## @param readReplicas.persistence.existingClaim Name of an existing PVC to use
+ ##
+ existingClaim: ""
+ ## @param readReplicas.persistence.mountPath The path the volume will be mounted at
+ ## Note: useful when using custom PostgreSQL images
+ ##
+ mountPath: /bitnami/postgresql
+ ## @param readReplicas.persistence.subPath The subdirectory of the volume to mount to
+ ## Useful in dev environments and one PV for multiple services
+ ##
+ subPath: ""
+ ## @param readReplicas.persistence.storageClass PVC Storage Class for PostgreSQL read only data volume
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ storageClass: ""
+ ## @param readReplicas.persistence.accessModes PVC Access Mode for PostgreSQL volume
+ ##
+ accessModes:
+ - ReadWriteOnce
+ ## @param readReplicas.persistence.size PVC Storage Request for PostgreSQL volume
+ ##
+ size: 8Gi
+ ## @param readReplicas.persistence.annotations Annotations for the PVC
+ ##
+ annotations: {}
+ ## @param readReplicas.persistence.labels Labels for the PVC
+ ##
+ labels: {}
+ ## @param readReplicas.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template)
+ ## selector:
+ ## matchLabels:
+ ## app: my-app
+ ##
+ selector: {}
+ ## @param readReplicas.persistence.dataSource Custom PVC data source
+ ##
+ dataSource: {}
+
+## @section NetworkPolicy parameters
+
+## Add networkpolicies
+##
+networkPolicy:
+ ## @param networkPolicy.enabled Enable network policies
+ ##
+ enabled: false
+ ## @param networkPolicy.metrics.enabled Enable network policies for metrics (prometheus)
+ ## @param networkPolicy.metrics.namespaceSelector [object] Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace.
+ ## @param networkPolicy.metrics.podSelector [object] Monitoring pod selector labels. These labels will be used to identify the Prometheus pods.
+ ##
+ metrics:
+ enabled: false
+ ## e.g:
+ ## namespaceSelector:
+ ## label: monitoring
+ ##
+ namespaceSelector: {}
+ ## e.g:
+ ## podSelector:
+ ## label: monitoring
+ ##
+ podSelector: {}
+ ## Ingress Rules
+ ##
+ ingressRules:
+ ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin.
+ ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s).
+ ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s).
+ ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules [object] Custom network policy for the PostgreSQL primary node.
+ ##
+ primaryAccessOnlyFrom:
+ enabled: false
+ ## e.g:
+ ## namespaceSelector:
+ ## label: ingress
+ ##
+ namespaceSelector: {}
+ ## e.g:
+ ## podSelector:
+ ## label: access
+ ##
+ podSelector: {}
+ ## custom ingress rules
+ ## e.g:
+ ## customRules:
+ ## - from:
+ ## - namespaceSelector:
+ ## matchLabels:
+ ## label: example
+ customRules: {}
+ ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin.
+ ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s).
+ ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s).
+ ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules [object] Custom network policy for the PostgreSQL read-only nodes.
+ ##
+ readReplicasAccessOnlyFrom:
+ enabled: false
+ ## e.g:
+ ## namespaceSelector:
+ ## label: ingress
+ ##
+ namespaceSelector: {}
+ ## e.g:
+ ## podSelector:
+ ## label: access
+ ##
+ podSelector: {}
+ ## custom ingress rules
+ ## e.g:
+ ## CustomRules:
+ ## - from:
+ ## - namespaceSelector:
+ ## matchLabels:
+ ## label: example
+ customRules: {}
+ ## @param networkPolicy.egressRules.denyConnectionsToExternal Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53).
+ ## @param networkPolicy.egressRules.customRules [object] Custom network policy rule
+ ##
+ egressRules:
+ # Deny connections to external. This is not compatible with an external database.
+ denyConnectionsToExternal: false
+ ## Additional custom egress rules
+ ## e.g:
+ ## customRules:
+ ## - to:
+ ## - namespaceSelector:
+ ## matchLabels:
+ ## label: example
+ customRules: {}
+
+## @section Volume Permissions parameters
+
+## Init containers parameters:
+## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node
+##
+volumePermissions:
+ ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume
+ ##
+ enabled: false
+ ## @param volumePermissions.image.registry Init container volume-permissions image registry
+ ## @param volumePermissions.image.repository Init container volume-permissions image repository
+ ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended)
+ ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+ ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy
+ ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets
+ ##
+ image:
+ registry: docker.io
+ repository: bitnami/bitnami-shell
+ tag: 11-debian-11-r69
+ digest: ""
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## Example:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+ ## Init container resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param volumePermissions.resources.limits Init container volume-permissions resource limits
+ ## @param volumePermissions.resources.requests Init container volume-permissions resource requests
+ ##
+ resources:
+ limits: {}
+ requests: {}
+ ## Init container' Security Context
+ ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser
+ ## and not the below volumePermissions.containerSecurityContext.runAsUser
+ ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container
+ ##
+ containerSecurityContext:
+ runAsUser: 0
+
+## @section Other Parameters
+
+## Service account for PostgreSQL to use.
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+##
+serviceAccount:
+ ## @param serviceAccount.create Enable creation of ServiceAccount for PostgreSQL pod
+ ##
+ create: false
+ ## @param serviceAccount.name The name of the ServiceAccount to use.
+ ## If not set and create is true, a name is generated using the common.names.fullname template
+ ##
+ name: ""
+ ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
+ ## Can be set to false if pods using this serviceAccount do not need to use K8s API
+ ##
+ automountServiceAccountToken: true
+ ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount
+ ##
+ annotations: {}
+## Creates role for ServiceAccount
+## @param rbac.create Create Role and RoleBinding (required for PSP to work)
+##
+rbac:
+ create: false
+ ## @param rbac.rules Custom RBAC rules to set
+ ## e.g:
+ ## rules:
+ ## - apiGroups:
+ ## - ""
+ ## resources:
+ ## - pods
+ ## verbs:
+ ## - get
+ ## - list
+ ##
+ rules: []
+## Pod Security Policy
+## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later
+##
+psp:
+ create: false
+
+## @section Metrics Parameters
+
+metrics:
+ ## @param metrics.enabled Start a prometheus exporter
+ ##
+ enabled: false
+ ## @param metrics.image.registry PostgreSQL Prometheus Exporter image registry
+ ## @param metrics.image.repository PostgreSQL Prometheus Exporter image repository
+ ## @param metrics.image.tag PostgreSQL Prometheus Exporter image tag (immutable tags are recommended)
+ ## @param metrics.image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+ ## @param metrics.image.pullPolicy PostgreSQL Prometheus Exporter image pull policy
+ ## @param metrics.image.pullSecrets Specify image pull secrets
+ ##
+ image:
+ registry: docker.io
+ repository: bitnami/postgres-exporter
+ tag: 0.11.1-debian-11-r46
+ digest: ""
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## Example:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+ ## @param metrics.customMetrics Define additional custom metrics
+ ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file
+ ## customMetrics:
+ ## pg_database:
+ ## query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')"
+ ## metrics:
+ ## - name:
+ ## usage: "LABEL"
+ ## description: "Name of the database"
+ ## - size_bytes:
+ ## usage: "GAUGE"
+ ## description: "Size of the database in bytes"
+ ##
+ customMetrics: {}
+ ## @param metrics.extraEnvVars Extra environment variables to add to PostgreSQL Prometheus exporter
+ ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables
+ ## For example:
+ ## extraEnvVars:
+ ## - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS
+ ## value: "true"
+ ##
+ extraEnvVars: []
+ ## PostgreSQL Prometheus exporter containers' Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+ ## @param metrics.containerSecurityContext.enabled Enable PostgreSQL Prometheus exporter containers' Security Context
+ ## @param metrics.containerSecurityContext.runAsUser Set PostgreSQL Prometheus exporter containers' Security Context runAsUser
+ ## @param metrics.containerSecurityContext.runAsNonRoot Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot
+ ##
+ containerSecurityContext:
+ enabled: true
+ runAsUser: 1001
+ runAsNonRoot: true
+ ## Configure extra options for PostgreSQL Prometheus exporter containers' liveness, readiness and startup probes
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
+ ## @param metrics.livenessProbe.enabled Enable livenessProbe on PostgreSQL Prometheus exporter containers
+ ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+ ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe
+ ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+ ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe
+ ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe
+ ##
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## @param metrics.readinessProbe.enabled Enable readinessProbe on PostgreSQL Prometheus exporter containers
+ ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+ ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe
+ ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+ ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe
+ ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe
+ ##
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## @param metrics.startupProbe.enabled Enable startupProbe on PostgreSQL Prometheus exporter containers
+ ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
+ ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe
+ ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe
+ ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe
+ ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe
+ ##
+ startupProbe:
+ enabled: false
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 1
+ failureThreshold: 15
+ successThreshold: 1
+ ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one
+ ##
+ customLivenessProbe: {}
+ ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one
+ ##
+ customReadinessProbe: {}
+ ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one
+ ##
+ customStartupProbe: {}
+ ## @param metrics.containerPorts.metrics PostgreSQL Prometheus exporter metrics container port
+ ##
+ containerPorts:
+ metrics: 9187
+ ## PostgreSQL Prometheus exporter resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param metrics.resources.limits The resources limits for the PostgreSQL Prometheus exporter container
+ ## @param metrics.resources.requests The requested resources for the PostgreSQL Prometheus exporter container
+ ##
+ resources:
+ limits: {}
+ requests: {}
+ ## Service configuration
+ ##
+ service:
+ ## @param metrics.service.ports.metrics PostgreSQL Prometheus Exporter service port
+ ##
+ ports:
+ metrics: 9187
+ ## @param metrics.service.clusterIP Static clusterIP or None for headless services
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
+ ##
+ clusterIP: ""
+ ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin
+ ## Values: ClientIP or None
+ ## ref: https://kubernetes.io/docs/user-guide/services/
+ ##
+ sessionAffinity: None
+ ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint
+ ##
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}"
+ ## Prometheus Operator ServiceMonitor configuration
+ ##
+ serviceMonitor:
+ ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator
+ ##
+ enabled: false
+ ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace)
+ ##
+ namespace: ""
+ ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped.
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+ ##
+ interval: ""
+ ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+ ##
+ scrapeTimeout: ""
+ ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus
+ ##
+ labels: {}
+ ## @param metrics.serviceMonitor.selector Prometheus instance selector labels
+ ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
+ ##
+ selector: {}
+ ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
+ ##
+ relabelings: []
+ ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion
+ ##
+ metricRelabelings: []
+ ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint
+ ##
+ honorLabels: false
+ ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
+ ##
+ jobLabel: ""
+ ## Custom PrometheusRule to be defined
+ ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
+ ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
+ ##
+ prometheusRule:
+ ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator
+ ##
+ enabled: false
+ ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace)
+ ##
+ namespace: ""
+ ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus
+ ##
+ labels: {}
+ ## @param metrics.prometheusRule.rules PrometheusRule definitions
+ ## Make sure to constraint the rules to the current postgresql service.
+ ## rules:
+ ## - alert: HugeReplicationLag
+ ## expr: pg_replication_lag{service="{{ printf "%s-metrics" (include "common.names.fullname" .) }}"} / 3600 > 1
+ ## for: 1m
+ ## labels:
+ ## severity: critical
+ ## annotations:
+ ## description: replication for {{ include "common.names.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s).
+ ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s).
+ ##
+ rules: []
diff --git a/charts/penpot/charts/redis/.helmignore b/charts/penpot/charts/redis/.helmignore
new file mode 100644
index 0000000..f0c1319
--- /dev/null
+++ b/charts/penpot/charts/redis/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/charts/penpot/charts/redis/Chart.lock b/charts/penpot/charts/redis/Chart.lock
new file mode 100644
index 0000000..ba89ecc
--- /dev/null
+++ b/charts/penpot/charts/redis/Chart.lock
@@ -0,0 +1,6 @@
+dependencies:
+- name: common
+ repository: https://charts.bitnami.com/bitnami
+ version: 2.2.2
+digest: sha256:49ca75cf23ba5eb7df4becef52580f98c8bd8194eb80368b9d7b875f6eefa8e5
+generated: "2022-12-12T19:34:26.826289322Z"
diff --git a/charts/penpot/charts/redis/Chart.yaml b/charts/penpot/charts/redis/Chart.yaml
new file mode 100644
index 0000000..ea15ea2
--- /dev/null
+++ b/charts/penpot/charts/redis/Chart.yaml
@@ -0,0 +1,27 @@
+annotations:
+ category: Database
+ licenses: Apache-2.0
+apiVersion: v2
+appVersion: 7.0.8
+dependencies:
+- name: common
+ repository: https://charts.bitnami.com/bitnami
+ tags:
+ - bitnami-common
+ version: 2.x.x
+description: Redis(R) is an open source, advanced key-value store. It is often referred
+ to as a data structure server since keys can contain strings, hashes, lists, sets
+ and sorted sets.
+home: https://github.com/bitnami/charts/tree/main/bitnami/redis
+icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png
+keywords:
+- redis
+- keyvalue
+- database
+maintainers:
+- name: Bitnami
+ url: https://github.com/bitnami/charts
+name: redis
+sources:
+- https://github.com/bitnami/containers/tree/main/bitnami/redis
+version: 17.6.0
diff --git a/charts/penpot/charts/redis/README.md b/charts/penpot/charts/redis/README.md
new file mode 100644
index 0000000..20373c9
--- /dev/null
+++ b/charts/penpot/charts/redis/README.md
@@ -0,0 +1,949 @@
+<!--- app-name: Redis® -->
+
+# Bitnami package for Redis(R)
+
+Redis(R) is an open source, advanced key-value store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets and sorted sets.
+
+[Overview of Redis®](http://redis.io)
+
+Disclaimer: Redis is a registered trademark of Redis Ltd. Any rights therein are reserved to Redis Ltd. Any use by Bitnami is for referential purposes only and does not indicate any sponsorship, endorsement, or affiliation between Redis Ltd.
+
+## TL;DR
+
+```console
+$ helm repo add my-repo https://charts.bitnami.com/bitnami
+$ helm install my-release my-repo/redis
+```
+
+## Introduction
+
+This chart bootstraps a [Redis®](https://github.com/bitnami/containers/tree/main/bitnami/redis) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
+
+### Choose between Redis® Helm Chart and Redis® Cluster Helm Chart
+
+You can choose any of the two Redis® Helm charts for deploying a Redis® cluster.
+
+1. [Redis® Helm Chart](https://github.com/bitnami/charts/tree/main/bitnami/redis) will deploy a master-replica cluster, with the [option](https://github.com/bitnami/charts/tree/main/bitnami/redis#redis-sentinel-configuration-parameters) of enabling using Redis® Sentinel.
+2. [Redis® Cluster Helm Chart](https://github.com/bitnami/charts/tree/main/bitnami/redis-cluster) will deploy a Redis® Cluster topology with sharding.
+
+The main features of each chart are the following:
+
+| Redis® | Redis® Cluster |
+|--------------------------------------------------------|------------------------------------------------------------------------|
+| Supports multiple databases | Supports only one database. Better if you have a big dataset |
+| Single write point (single master) | Multiple write points (multiple masters) |
+|  |  |
+
+## Prerequisites
+
+- Kubernetes 1.19+
+- Helm 3.2.0+
+- PV provisioner support in the underlying infrastructure
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```console
+$ helm repo add my-repo https://charts.bitnami.com/bitnami
+$ helm install my-release my-repo/redis
+```
+
+The command deploys Redis® on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```console
+$ helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Parameters
+
+### Global parameters
+
+| Name | Description | Value |
+| ------------------------- | ------------------------------------------------------ | ----- |
+| `global.imageRegistry` | Global Docker image registry | `""` |
+| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
+| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
+| `global.redis.password` | Global Redis® password (overrides `auth.password`) | `""` |
+
+
+### Common parameters
+
+| Name | Description | Value |
+| ------------------------ | --------------------------------------------------------------------------------------- | --------------- |
+| `kubeVersion` | Override Kubernetes version | `""` |
+| `nameOverride` | String to partially override common.names.fullname | `""` |
+| `fullnameOverride` | String to fully override common.names.fullname | `""` |
+| `commonLabels` | Labels to add to all deployed objects | `{}` |
+| `commonAnnotations` | Annotations to add to all deployed objects | `{}` |
+| `secretAnnotations` | Annotations to add to secret | `{}` |
+| `clusterDomain` | Kubernetes cluster domain name | `cluster.local` |
+| `extraDeploy` | Array of extra objects to deploy with the release | `[]` |
+| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` |
+| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` |
+| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` |
+
+
+### Redis® Image parameters
+
+| Name | Description | Value |
+| ------------------- | ---------------------------------------------------------------------------------------------------------- | -------------------- |
+| `image.registry` | Redis® image registry | `docker.io` |
+| `image.repository` | Redis® image repository | `bitnami/redis` |
+| `image.tag` | Redis® image tag (immutable tags are recommended) | `7.0.8-debian-11-r0` |
+| `image.digest` | Redis® image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
+| `image.pullPolicy` | Redis® image pull policy | `IfNotPresent` |
+| `image.pullSecrets` | Redis® image pull secrets | `[]` |
+| `image.debug` | Enable image debug mode | `false` |
+
+
+### Redis® common configuration parameters
+
+| Name | Description | Value |
+| -------------------------------- | ------------------------------------------------------------------------------------- | ------------- |
+| `architecture` | Redis® architecture. Allowed values: `standalone` or `replication` | `replication` |
+| `auth.enabled` | Enable password authentication | `true` |
+| `auth.sentinel` | Enable password authentication on sentinels too | `true` |
+| `auth.password` | Redis® password | `""` |
+| `auth.existingSecret` | The name of an existing secret with Redis® credentials | `""` |
+| `auth.existingSecretPasswordKey` | Password key to be retrieved from existing secret | `""` |
+| `auth.usePasswordFiles` | Mount credentials as files instead of using an environment variable | `false` |
+| `commonConfiguration` | Common configuration to be added into the ConfigMap | `""` |
+| `existingConfigmap` | The name of an existing ConfigMap with your custom configuration for Redis® nodes | `""` |
+
+
+### Redis® master configuration parameters
+
+| Name | Description | Value |
+| ---------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | ------------------------ |
+| `master.count` | Number of Redis® master instances to deploy (experimental, requires additional configuration) | `1` |
+| `master.configuration` | Configuration for Redis® master nodes | `""` |
+| `master.disableCommands` | Array with Redis® commands to disable on master nodes | `["FLUSHDB","FLUSHALL"]` |
+| `master.command` | Override default container command (useful when using custom images) | `[]` |
+| `master.args` | Override default container args (useful when using custom images) | `[]` |
+| `master.preExecCmds` | Additional commands to run prior to starting Redis® master | `[]` |
+| `master.extraFlags` | Array with additional command line flags for Redis® master | `[]` |
+| `master.extraEnvVars` | Array with extra environment variables to add to Redis® master nodes | `[]` |
+| `master.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Redis® master nodes | `""` |
+| `master.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Redis® master nodes | `""` |
+| `master.containerPorts.redis` | Container port to open on Redis® master nodes | `6379` |
+| `master.startupProbe.enabled` | Enable startupProbe on Redis® master nodes | `false` |
+| `master.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `20` |
+| `master.startupProbe.periodSeconds` | Period seconds for startupProbe | `5` |
+| `master.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` |
+| `master.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` |
+| `master.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
+| `master.livenessProbe.enabled` | Enable livenessProbe on Redis® master nodes | `true` |
+| `master.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `20` |
+| `master.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` |
+| `master.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
+| `master.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` |
+| `master.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
+| `master.readinessProbe.enabled` | Enable readinessProbe on Redis® master nodes | `true` |
+| `master.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `20` |
+| `master.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` |
+| `master.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` |
+| `master.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` |
+| `master.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
+| `master.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
+| `master.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
+| `master.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
+| `master.resources.limits` | The resources limits for the Redis® master containers | `{}` |
+| `master.resources.requests` | The requested resources for the Redis® master containers | `{}` |
+| `master.podSecurityContext.enabled` | Enabled Redis® master pods' Security Context | `true` |
+| `master.podSecurityContext.fsGroup` | Set Redis® master pod's Security Context fsGroup | `1001` |
+| `master.containerSecurityContext.enabled` | Enabled Redis® master containers' Security Context | `true` |
+| `master.containerSecurityContext.runAsUser` | Set Redis® master containers' Security Context runAsUser | `1001` |
+| `master.kind` | Use either Deployment or StatefulSet (default) | `StatefulSet` |
+| `master.schedulerName` | Alternate scheduler for Redis® master pods | `""` |
+| `master.updateStrategy.type` | Redis® master statefulset strategy type | `RollingUpdate` |
+| `master.minReadySeconds` | How many seconds a pod needs to be ready before killing the next, during update | `0` |
+| `master.priorityClassName` | Redis® master pods' priorityClassName | `""` |
+| `master.hostAliases` | Redis® master pods host aliases | `[]` |
+| `master.podLabels` | Extra labels for Redis® master pods | `{}` |
+| `master.podAnnotations` | Annotations for Redis® master pods | `{}` |
+| `master.shareProcessNamespace` | Share a single process namespace between all of the containers in Redis® master pods | `false` |
+| `master.podAffinityPreset` | Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `master.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `soft` |
+| `master.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `master.nodeAffinityPreset.key` | Node label key to match. Ignored if `master.affinity` is set | `""` |
+| `master.nodeAffinityPreset.values` | Node label values to match. Ignored if `master.affinity` is set | `[]` |
+| `master.affinity` | Affinity for Redis® master pods assignment | `{}` |
+| `master.nodeSelector` | Node labels for Redis® master pods assignment | `{}` |
+| `master.tolerations` | Tolerations for Redis® master pods assignment | `[]` |
+| `master.topologySpreadConstraints` | Spread Constraints for Redis® master pod assignment | `[]` |
+| `master.dnsPolicy` | DNS Policy for Redis® master pod | `""` |
+| `master.dnsConfig` | DNS Configuration for Redis® master pod | `{}` |
+| `master.lifecycleHooks` | for the Redis® master container(s) to automate configuration before or after startup | `{}` |
+| `master.extraVolumes` | Optionally specify extra list of additional volumes for the Redis® master pod(s) | `[]` |
+| `master.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis® master container(s) | `[]` |
+| `master.sidecars` | Add additional sidecar containers to the Redis® master pod(s) | `[]` |
+| `master.initContainers` | Add additional init containers to the Redis® master pod(s) | `[]` |
+| `master.persistence.enabled` | Enable persistence on Redis® master nodes using Persistent Volume Claims | `true` |
+| `master.persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` |
+| `master.persistence.sizeLimit` | Set this to enable a size limit for `emptyDir` volumes. | `""` |
+| `master.persistence.path` | The path the volume will be mounted at on Redis® master containers | `/data` |
+| `master.persistence.subPath` | The subdirectory of the volume to mount on Redis® master containers | `""` |
+| `master.persistence.subPathExpr` | Used to construct the subPath subdirectory of the volume to mount on Redis® master containers | `""` |
+| `master.persistence.storageClass` | Persistent Volume storage class | `""` |
+| `master.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` |
+| `master.persistence.size` | Persistent Volume size | `8Gi` |
+| `master.persistence.annotations` | Additional custom annotations for the PVC | `{}` |
+| `master.persistence.selector` | Additional labels to match for the PVC | `{}` |
+| `master.persistence.dataSource` | Custom PVC data source | `{}` |
+| `master.persistence.existingClaim` | Use a existing PVC which must be created manually before bound | `""` |
+| `master.service.type` | Redis® master service type | `ClusterIP` |
+| `master.service.ports.redis` | Redis® master service port | `6379` |
+| `master.service.nodePorts.redis` | Node port for Redis® master | `""` |
+| `master.service.externalTrafficPolicy` | Redis® master service external traffic policy | `Cluster` |
+| `master.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` |
+| `master.service.internalTrafficPolicy` | Redis® master service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) | `Cluster` |
+| `master.service.clusterIP` | Redis® master service Cluster IP | `""` |
+| `master.service.loadBalancerIP` | Redis® master service Load Balancer IP | `""` |
+| `master.service.loadBalancerSourceRanges` | Redis® master service Load Balancer sources | `[]` |
+| `master.service.externalIPs` | Redis® master service External IPs | `[]` |
+| `master.service.annotations` | Additional custom annotations for Redis® master service | `{}` |
+| `master.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
+| `master.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
+| `master.terminationGracePeriodSeconds` | Integer setting the termination grace period for the redis-master pods | `30` |
+| `master.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` |
+| `master.serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
+| `master.serviceAccount.automountServiceAccountToken` | Whether to auto mount the service account token | `true` |
+| `master.serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` |
+
+
+### Redis® replicas configuration parameters
+
+| Name | Description | Value |
+| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------- | ------------------------ |
+| `replica.replicaCount` | Number of Redis® replicas to deploy | `3` |
+| `replica.configuration` | Configuration for Redis® replicas nodes | `""` |
+| `replica.disableCommands` | Array with Redis® commands to disable on replicas nodes | `["FLUSHDB","FLUSHALL"]` |
+| `replica.command` | Override default container command (useful when using custom images) | `[]` |
+| `replica.args` | Override default container args (useful when using custom images) | `[]` |
+| `replica.preExecCmds` | Additional commands to run prior to starting Redis® replicas | `[]` |
+| `replica.extraFlags` | Array with additional command line flags for Redis® replicas | `[]` |
+| `replica.extraEnvVars` | Array with extra environment variables to add to Redis® replicas nodes | `[]` |
+| `replica.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Redis® replicas nodes | `""` |
+| `replica.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Redis® replicas nodes | `""` |
+| `replica.externalMaster.enabled` | Use external master for bootstrapping | `false` |
+| `replica.externalMaster.host` | External master host to bootstrap from | `""` |
+| `replica.externalMaster.port` | Port for Redis service external master host | `6379` |
+| `replica.containerPorts.redis` | Container port to open on Redis® replicas nodes | `6379` |
+| `replica.startupProbe.enabled` | Enable startupProbe on Redis® replicas nodes | `true` |
+| `replica.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` |
+| `replica.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
+| `replica.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` |
+| `replica.startupProbe.failureThreshold` | Failure threshold for startupProbe | `22` |
+| `replica.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
+| `replica.livenessProbe.enabled` | Enable livenessProbe on Redis® replicas nodes | `true` |
+| `replica.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `20` |
+| `replica.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` |
+| `replica.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
+| `replica.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` |
+| `replica.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
+| `replica.readinessProbe.enabled` | Enable readinessProbe on Redis® replicas nodes | `true` |
+| `replica.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `20` |
+| `replica.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` |
+| `replica.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` |
+| `replica.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` |
+| `replica.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
+| `replica.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
+| `replica.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
+| `replica.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
+| `replica.resources.limits` | The resources limits for the Redis® replicas containers | `{}` |
+| `replica.resources.requests` | The requested resources for the Redis® replicas containers | `{}` |
+| `replica.podSecurityContext.enabled` | Enabled Redis® replicas pods' Security Context | `true` |
+| `replica.podSecurityContext.fsGroup` | Set Redis® replicas pod's Security Context fsGroup | `1001` |
+| `replica.containerSecurityContext.enabled` | Enabled Redis® replicas containers' Security Context | `true` |
+| `replica.containerSecurityContext.runAsUser` | Set Redis® replicas containers' Security Context runAsUser | `1001` |
+| `replica.schedulerName` | Alternate scheduler for Redis® replicas pods | `""` |
+| `replica.updateStrategy.type` | Redis® replicas statefulset strategy type | `RollingUpdate` |
+| `replica.minReadySeconds` | How many seconds a pod needs to be ready before killing the next, during update | `0` |
+| `replica.priorityClassName` | Redis® replicas pods' priorityClassName | `""` |
+| `replica.podManagementPolicy` | podManagementPolicy to manage scaling operation of %%MAIN_CONTAINER_NAME%% pods | `""` |
+| `replica.hostAliases` | Redis® replicas pods host aliases | `[]` |
+| `replica.podLabels` | Extra labels for Redis® replicas pods | `{}` |
+| `replica.podAnnotations` | Annotations for Redis® replicas pods | `{}` |
+| `replica.shareProcessNamespace` | Share a single process namespace between all of the containers in Redis® replicas pods | `false` |
+| `replica.podAffinityPreset` | Pod affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `replica.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` | `soft` |
+| `replica.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `replica.nodeAffinityPreset.key` | Node label key to match. Ignored if `replica.affinity` is set | `""` |
+| `replica.nodeAffinityPreset.values` | Node label values to match. Ignored if `replica.affinity` is set | `[]` |
+| `replica.affinity` | Affinity for Redis® replicas pods assignment | `{}` |
+| `replica.nodeSelector` | Node labels for Redis® replicas pods assignment | `{}` |
+| `replica.tolerations` | Tolerations for Redis® replicas pods assignment | `[]` |
+| `replica.topologySpreadConstraints` | Spread Constraints for Redis® replicas pod assignment | `[]` |
+| `replica.dnsPolicy` | DNS Policy for Redis® replica pods | `""` |
+| `replica.dnsConfig` | DNS Configuration for Redis® replica pods | `{}` |
+| `replica.lifecycleHooks` | for the Redis® replica container(s) to automate configuration before or after startup | `{}` |
+| `replica.extraVolumes` | Optionally specify extra list of additional volumes for the Redis® replicas pod(s) | `[]` |
+| `replica.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis® replicas container(s) | `[]` |
+| `replica.sidecars` | Add additional sidecar containers to the Redis® replicas pod(s) | `[]` |
+| `replica.initContainers` | Add additional init containers to the Redis® replicas pod(s) | `[]` |
+| `replica.persistence.enabled` | Enable persistence on Redis® replicas nodes using Persistent Volume Claims | `true` |
+| `replica.persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` |
+| `replica.persistence.sizeLimit` | Set this to enable a size limit for `emptyDir` volumes. | `""` |
+| `replica.persistence.path` | The path the volume will be mounted at on Redis® replicas containers | `/data` |
+| `replica.persistence.subPath` | The subdirectory of the volume to mount on Redis® replicas containers | `""` |
+| `replica.persistence.subPathExpr` | Used to construct the subPath subdirectory of the volume to mount on Redis® replicas containers | `""` |
+| `replica.persistence.storageClass` | Persistent Volume storage class | `""` |
+| `replica.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` |
+| `replica.persistence.size` | Persistent Volume size | `8Gi` |
+| `replica.persistence.annotations` | Additional custom annotations for the PVC | `{}` |
+| `replica.persistence.selector` | Additional labels to match for the PVC | `{}` |
+| `replica.persistence.dataSource` | Custom PVC data source | `{}` |
+| `replica.persistence.existingClaim` | Use a existing PVC which must be created manually before bound | `""` |
+| `replica.service.type` | Redis® replicas service type | `ClusterIP` |
+| `replica.service.ports.redis` | Redis® replicas service port | `6379` |
+| `replica.service.nodePorts.redis` | Node port for Redis® replicas | `""` |
+| `replica.service.externalTrafficPolicy` | Redis® replicas service external traffic policy | `Cluster` |
+| `replica.service.internalTrafficPolicy` | Redis® replicas service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) | `Cluster` |
+| `replica.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` |
+| `replica.service.clusterIP` | Redis® replicas service Cluster IP | `""` |
+| `replica.service.loadBalancerIP` | Redis® replicas service Load Balancer IP | `""` |
+| `replica.service.loadBalancerSourceRanges` | Redis® replicas service Load Balancer sources | `[]` |
+| `replica.service.annotations` | Additional custom annotations for Redis® replicas service | `{}` |
+| `replica.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
+| `replica.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
+| `replica.terminationGracePeriodSeconds` | Integer setting the termination grace period for the redis-replicas pods | `30` |
+| `replica.autoscaling.enabled` | Enable replica autoscaling settings | `false` |
+| `replica.autoscaling.minReplicas` | Minimum replicas for the pod autoscaling | `1` |
+| `replica.autoscaling.maxReplicas` | Maximum replicas for the pod autoscaling | `11` |
+| `replica.autoscaling.targetCPU` | Percentage of CPU to consider when autoscaling | `""` |
+| `replica.autoscaling.targetMemory` | Percentage of Memory to consider when autoscaling | `""` |
+| `replica.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` |
+| `replica.serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
+| `replica.serviceAccount.automountServiceAccountToken` | Whether to auto mount the service account token | `true` |
+| `replica.serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` |
+
+
+### Redis® Sentinel configuration parameters
+
+| Name | Description | Value |
+| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------ |
+| `sentinel.enabled` | Use Redis® Sentinel on Redis® pods. | `false` |
+| `sentinel.image.registry` | Redis® Sentinel image registry | `docker.io` |
+| `sentinel.image.repository` | Redis® Sentinel image repository | `bitnami/redis-sentinel` |
+| `sentinel.image.tag` | Redis® Sentinel image tag (immutable tags are recommended) | `7.0.7-debian-11-r10` |
+| `sentinel.image.digest` | Redis® Sentinel image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
+| `sentinel.image.pullPolicy` | Redis® Sentinel image pull policy | `IfNotPresent` |
+| `sentinel.image.pullSecrets` | Redis® Sentinel image pull secrets | `[]` |
+| `sentinel.image.debug` | Enable image debug mode | `false` |
+| `sentinel.masterSet` | Master set name | `mymaster` |
+| `sentinel.quorum` | Sentinel Quorum | `2` |
+| `sentinel.getMasterTimeout` | Amount of time to allow before get_sentinel_master_info() times out. | `220` |
+| `sentinel.automateClusterRecovery` | Automate cluster recovery in cases where the last replica is not considered a good replica and Sentinel won't automatically failover to it. | `false` |
+| `sentinel.redisShutdownWaitFailover` | Whether the Redis® master container waits for the failover at shutdown (in addition to the Redis® Sentinel container). | `true` |
+| `sentinel.downAfterMilliseconds` | Timeout for detecting a Redis® node is down | `60000` |
+| `sentinel.failoverTimeout` | Timeout for performing a election failover | `180000` |
+| `sentinel.parallelSyncs` | Number of replicas that can be reconfigured in parallel to use the new master after a failover | `1` |
+| `sentinel.configuration` | Configuration for Redis® Sentinel nodes | `""` |
+| `sentinel.command` | Override default container command (useful when using custom images) | `[]` |
+| `sentinel.args` | Override default container args (useful when using custom images) | `[]` |
+| `sentinel.preExecCmds` | Additional commands to run prior to starting Redis® Sentinel | `[]` |
+| `sentinel.extraEnvVars` | Array with extra environment variables to add to Redis® Sentinel nodes | `[]` |
+| `sentinel.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Redis® Sentinel nodes | `""` |
+| `sentinel.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Redis® Sentinel nodes | `""` |
+| `sentinel.externalMaster.enabled` | Use external master for bootstrapping | `false` |
+| `sentinel.externalMaster.host` | External master host to bootstrap from | `""` |
+| `sentinel.externalMaster.port` | Port for Redis service external master host | `6379` |
+| `sentinel.containerPorts.sentinel` | Container port to open on Redis® Sentinel nodes | `26379` |
+| `sentinel.startupProbe.enabled` | Enable startupProbe on Redis® Sentinel nodes | `true` |
+| `sentinel.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` |
+| `sentinel.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
+| `sentinel.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` |
+| `sentinel.startupProbe.failureThreshold` | Failure threshold for startupProbe | `22` |
+| `sentinel.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
+| `sentinel.livenessProbe.enabled` | Enable livenessProbe on Redis® Sentinel nodes | `true` |
+| `sentinel.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `20` |
+| `sentinel.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` |
+| `sentinel.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
+| `sentinel.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` |
+| `sentinel.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
+| `sentinel.readinessProbe.enabled` | Enable readinessProbe on Redis® Sentinel nodes | `true` |
+| `sentinel.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `20` |
+| `sentinel.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` |
+| `sentinel.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` |
+| `sentinel.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` |
+| `sentinel.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
+| `sentinel.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
+| `sentinel.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
+| `sentinel.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
+| `sentinel.persistence.enabled` | Enable persistence on Redis® sentinel nodes using Persistent Volume Claims (Experimental) | `false` |
+| `sentinel.persistence.storageClass` | Persistent Volume storage class | `""` |
+| `sentinel.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` |
+| `sentinel.persistence.size` | Persistent Volume size | `100Mi` |
+| `sentinel.persistence.annotations` | Additional custom annotations for the PVC | `{}` |
+| `sentinel.persistence.selector` | Additional labels to match for the PVC | `{}` |
+| `sentinel.persistence.dataSource` | Custom PVC data source | `{}` |
+| `sentinel.persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` |
+| `sentinel.persistence.sizeLimit` | Set this to enable a size limit for `emptyDir` volumes. | `""` |
+| `sentinel.resources.limits` | The resources limits for the Redis® Sentinel containers | `{}` |
+| `sentinel.resources.requests` | The requested resources for the Redis® Sentinel containers | `{}` |
+| `sentinel.containerSecurityContext.enabled` | Enabled Redis® Sentinel containers' Security Context | `true` |
+| `sentinel.containerSecurityContext.runAsUser` | Set Redis® Sentinel containers' Security Context runAsUser | `1001` |
+| `sentinel.lifecycleHooks` | for the Redis® sentinel container(s) to automate configuration before or after startup | `{}` |
+| `sentinel.extraVolumes` | Optionally specify extra list of additional volumes for the Redis® Sentinel | `[]` |
+| `sentinel.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis® Sentinel container(s) | `[]` |
+| `sentinel.service.type` | Redis® Sentinel service type | `ClusterIP` |
+| `sentinel.service.ports.redis` | Redis® service port for Redis® | `6379` |
+| `sentinel.service.ports.sentinel` | Redis® service port for Redis® Sentinel | `26379` |
+| `sentinel.service.nodePorts.redis` | Node port for Redis® | `""` |
+| `sentinel.service.nodePorts.sentinel` | Node port for Sentinel | `""` |
+| `sentinel.service.externalTrafficPolicy` | Redis® Sentinel service external traffic policy | `Cluster` |
+| `sentinel.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` |
+| `sentinel.service.clusterIP` | Redis® Sentinel service Cluster IP | `""` |
+| `sentinel.service.loadBalancerIP` | Redis® Sentinel service Load Balancer IP | `""` |
+| `sentinel.service.loadBalancerSourceRanges` | Redis® Sentinel service Load Balancer sources | `[]` |
+| `sentinel.service.annotations` | Additional custom annotations for Redis® Sentinel service | `{}` |
+| `sentinel.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
+| `sentinel.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
+| `sentinel.terminationGracePeriodSeconds` | Integer setting the termination grace period for the redis-node pods | `30` |
+
+
+### Other Parameters
+
+| Name | Description | Value |
+| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
+| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` |
+| `networkPolicy.allowExternal` | Don't require client label for connections | `true` |
+| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` |
+| `networkPolicy.extraEgress` | Add extra egress rules to the NetworkPolicy | `[]` |
+| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` |
+| `networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` |
+| `podSecurityPolicy.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` |
+| `podSecurityPolicy.enabled` | Enable PodSecurityPolicy's RBAC rules | `false` |
+| `rbac.create` | Specifies whether RBAC resources should be created | `false` |
+| `rbac.rules` | Custom RBAC rules to set | `[]` |
+| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
+| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
+| `serviceAccount.automountServiceAccountToken` | Whether to auto mount the service account token | `true` |
+| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` |
+| `pdb.create` | Specifies whether a PodDisruptionBudget should be created | `false` |
+| `pdb.minAvailable` | Min number of pods that must still be available after the eviction | `1` |
+| `pdb.maxUnavailable` | Max number of pods that can be unavailable after the eviction | `""` |
+| `tls.enabled` | Enable TLS traffic | `false` |
+| `tls.authClients` | Require clients to authenticate | `true` |
+| `tls.autoGenerated` | Enable autogenerated certificates | `false` |
+| `tls.existingSecret` | The name of the existing secret that contains the TLS certificates | `""` |
+| `tls.certificatesSecret` | DEPRECATED. Use existingSecret instead. | `""` |
+| `tls.certFilename` | Certificate filename | `""` |
+| `tls.certKeyFilename` | Certificate Key filename | `""` |
+| `tls.certCAFilename` | CA Certificate filename | `""` |
+| `tls.dhParamsFilename` | File containing DH params (in order to support DH based ciphers) | `""` |
+
+
+### Metrics Parameters
+
+| Name | Description | Value |
+| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------------ |
+| `metrics.enabled` | Start a sidecar prometheus exporter to expose Redis® metrics | `false` |
+| `metrics.image.registry` | Redis® Exporter image registry | `docker.io` |
+| `metrics.image.repository` | Redis® Exporter image repository | `bitnami/redis-exporter` |
+| `metrics.image.tag` | Redis® Exporter image tag (immutable tags are recommended) | `1.45.0-debian-11-r26` |
+| `metrics.image.digest` | Redis® Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
+| `metrics.image.pullPolicy` | Redis® Exporter image pull policy | `IfNotPresent` |
+| `metrics.image.pullSecrets` | Redis® Exporter image pull secrets | `[]` |
+| `metrics.startupProbe.enabled` | Enable startupProbe on Redis® replicas nodes | `false` |
+| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` |
+| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
+| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` |
+| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` |
+| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
+| `metrics.livenessProbe.enabled` | Enable livenessProbe on Redis® replicas nodes | `true` |
+| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` |
+| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
+| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
+| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` |
+| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
+| `metrics.readinessProbe.enabled` | Enable readinessProbe on Redis® replicas nodes | `true` |
+| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
+| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
+| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` |
+| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` |
+| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
+| `metrics.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
+| `metrics.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
+| `metrics.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
+| `metrics.command` | Override default metrics container init command (useful when using custom images) | `[]` |
+| `metrics.redisTargetHost` | A way to specify an alternative Redis® hostname | `localhost` |
+| `metrics.extraArgs` | Extra arguments for Redis® exporter, for example: | `{}` |
+| `metrics.extraEnvVars` | Array with extra environment variables to add to Redis® exporter | `[]` |
+| `metrics.containerSecurityContext.enabled` | Enabled Redis® exporter containers' Security Context | `true` |
+| `metrics.containerSecurityContext.runAsUser` | Set Redis® exporter containers' Security Context runAsUser | `1001` |
+| `metrics.extraVolumes` | Optionally specify extra list of additional volumes for the Redis® metrics sidecar | `[]` |
+| `metrics.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis® metrics sidecar | `[]` |
+| `metrics.resources.limits` | The resources limits for the Redis® exporter container | `{}` |
+| `metrics.resources.requests` | The requested resources for the Redis® exporter container | `{}` |
+| `metrics.podLabels` | Extra labels for Redis® exporter pods | `{}` |
+| `metrics.podAnnotations` | Annotations for Redis® exporter pods | `{}` |
+| `metrics.service.type` | Redis® exporter service type | `ClusterIP` |
+| `metrics.service.port` | Redis® exporter service port | `9121` |
+| `metrics.service.externalTrafficPolicy` | Redis® exporter service external traffic policy | `Cluster` |
+| `metrics.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` |
+| `metrics.service.loadBalancerIP` | Redis® exporter service Load Balancer IP | `""` |
+| `metrics.service.loadBalancerSourceRanges` | Redis® exporter service Load Balancer sources | `[]` |
+| `metrics.service.annotations` | Additional custom annotations for Redis® exporter service | `{}` |
+| `metrics.serviceMonitor.enabled` | Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator | `false` |
+| `metrics.serviceMonitor.namespace` | The namespace in which the ServiceMonitor will be created | `""` |
+| `metrics.serviceMonitor.interval` | The interval at which metrics should be scraped | `30s` |
+| `metrics.serviceMonitor.scrapeTimeout` | The timeout after which the scrape is ended | `""` |
+| `metrics.serviceMonitor.relabellings` | Metrics RelabelConfigs to apply to samples before scraping. | `[]` |
+| `metrics.serviceMonitor.metricRelabelings` | Metrics RelabelConfigs to apply to samples before ingestion. | `[]` |
+| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` |
+| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus | `{}` |
+| `metrics.serviceMonitor.podTargetLabels` | Labels from the Kubernetes pod to be transferred to the created metrics | `[]` |
+| `metrics.prometheusRule.enabled` | Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator | `false` |
+| `metrics.prometheusRule.namespace` | The namespace in which the prometheusRule will be created | `""` |
+| `metrics.prometheusRule.additionalLabels` | Additional labels for the prometheusRule | `{}` |
+| `metrics.prometheusRule.rules` | Custom Prometheus rules | `[]` |
+
+
+### Init Container Parameters
+
+| Name | Description | Value |
+| ------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------- | ----------------------- |
+| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` |
+| `volumePermissions.image.registry` | Bitnami Shell image registry | `docker.io` |
+| `volumePermissions.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` |
+| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r72` |
+| `volumePermissions.image.digest` | Bitnami Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
+| `volumePermissions.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` |
+| `volumePermissions.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` |
+| `volumePermissions.resources.limits` | The resources limits for the init container | `{}` |
+| `volumePermissions.resources.requests` | The requested resources for the init container | `{}` |
+| `volumePermissions.containerSecurityContext.runAsUser` | Set init container's Security Context runAsUser | `0` |
+| `sysctl.enabled` | Enable init container to modify Kernel settings | `false` |
+| `sysctl.image.registry` | Bitnami Shell image registry | `docker.io` |
+| `sysctl.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` |
+| `sysctl.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r72` |
+| `sysctl.image.digest` | Bitnami Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
+| `sysctl.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` |
+| `sysctl.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` |
+| `sysctl.command` | Override default init-sysctl container command (useful when using custom images) | `[]` |
+| `sysctl.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` |
+| `sysctl.resources.limits` | The resources limits for the init container | `{}` |
+| `sysctl.resources.requests` | The requested resources for the init container | `{}` |
+
+
+### useExternalDNS Parameters
+
+| Name | Description | Value |
+| -------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- |
+| `useExternalDNS.enabled` | Enable various syntax that would enable external-dns to work. Note this requires a working installation of `external-dns` to be usable. | `false` |
+| `useExternalDNS.additionalAnnotations` | Extra annotations to be utilized when `external-dns` is enabled. | `{}` |
+| `useExternalDNS.annotationKey` | The annotation key utilized when `external-dns` is enabled. Setting this to `false` will disable annotations. | `external-dns.alpha.kubernetes.io/` |
+| `useExternalDNS.suffix` | The DNS suffix utilized when `external-dns` is enabled. Note that we prepend the suffix with the full name of the release. | `""` |
+
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+$ helm install my-release \
+ --set auth.password=secretpassword \
+ my-repo/redis
+```
+
+The above command sets the Redis® server password to `secretpassword`.
+
+> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available.
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
+
+```console
+$ helm install my-release -f values.yaml my-repo/redis
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml)
+
+## Configuration and installation details
+
+### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
+
+It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
+
+Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
+
+### Use a different Redis® version
+
+To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter. Refer to the [chart documentation for more information on these parameters and how to use them with images from a private registry](https://docs.bitnami.com/kubernetes/infrastructure/redis/configuration/change-image-version/).
+
+### Bootstrapping with an External Cluster
+
+This chart is equipped with the ability to bring online a set of Pods that connect to an existing Redis deployment that lies outside of Kubernetes. This effectively creates a hybrid Redis Deployment where both Pods in Kubernetes and Instances such as Virtual Machines can partake in a single Redis Deployment. This is helpful in situations where one may be migrating Redis from Virtual Machines into Kubernetes, for example. To take advantage of this, use the following as an example configuration:
+
+```yaml
+replica:
+ externalMaster:
+ enabled: true
+ host: external-redis-0.internal
+sentinel:
+ externalMaster:
+ enabled: true
+ host: external-redis-0.internal
+```
+
+:warning: This is currently limited to clusters in which Sentinel and Redis run on the same node! :warning:
+
+Please also note that the external sentinel must be listening on port `26379`, and this is currently not configurable.
+
+Once the Kubernetes Redis Deployment is online and confirmed to be working with the existing cluster, the configuration can then be removed and the cluster will remain connected.
+
+### External DNS
+
+This chart is equipped to allow leveraging the ExternalDNS project. Doing so will enable ExternalDNS to publish the FQDN for each instance, in the format of `<pod-name>.<release-name>.<dns-suffix>`.
+Example, when using the following configuration:
+
+```yaml
+useExternalDNS:
+ enabled: true
+ suffix: prod.example.org
+ additionalAnnotations:
+ ttl: 10
+```
+
+On a cluster where the name of the Helm release is `a`, the hostname of a Pod is generated as: `a-redis-node-0.a-redis.prod.example.org`. The IP of that FQDN will match that of the associated Pod. This modifies the following parameters of the Redis/Sentinel configuration using this new FQDN:
+
+* `replica-announce-ip`
+* `known-sentinel`
+* `known-replica`
+* `announce-ip`
+
+:warning: This requires a working installation of `external-dns` to be fully functional. :warning:
+
+See the [official ExternalDNS documentation](https://github.com/kubernetes-sigs/external-dns) for additional configuration options.
+
+### Cluster topologies
+
+#### Default: Master-Replicas
+
+When installing the chart with `architecture=replication`, it will deploy a Redis® master StatefulSet and a Redis® replicas StatefulSet. The replicas will be read-replicas of the master. Two services will be exposed:
+
+- Redis® Master service: Points to the master, where read-write operations can be performed
+- Redis® Replicas service: Points to the replicas, where only read operations are allowed by default.
+
+In case the master crashes, the replicas will wait until the master node is respawned again by the Kubernetes Controller Manager.
+
+#### Standalone
+
+When installing the chart with `architecture=standalone`, it will deploy a standalone Redis® StatefulSet. A single service will be exposed:
+
+- Redis® Master service: Points to the master, where read-write operations can be performed
+
+#### Master-Replicas with Sentinel
+
+When installing the chart with `architecture=replication` and `sentinel.enabled=true`, it will deploy a Redis® master StatefulSet (only one master allowed) and a Redis® replicas StatefulSet. In this case, the pods will contain an extra container with Redis® Sentinel. This container will form a cluster of Redis® Sentinel nodes, which will promote a new master in case the actual one fails.
+
+On graceful termination of the Redis® master pod, a failover of the master is initiated to promote a new master. The Redis® Sentinel container in this pod will wait for the failover to occur before terminating. If `sentinel.redisShutdownWaitFailover=true` is set (the default), the Redis® container will wait for the failover as well before terminating. This increases availability for reads during failover, but may cause stale reads until all clients have switched to the new master.
+
+In addition to this, only one service is exposed:
+
+- Redis® service: Exposes port 6379 for Redis® read-only operations and port 26379 for accessing Redis® Sentinel.
+
+For read-only operations, access the service using port 6379. For write operations, it's necessary to access the Redis® Sentinel cluster and query the current master using the command below (using redis-cli or similar):
+
+```
+SENTINEL get-master-addr-by-name <name of your MasterSet. e.g: mymaster>
+```
+
+This command will return the address of the current master, which can be accessed from inside the cluster.
+
+In case the current master crashes, the Sentinel containers will elect a new master node.
+
+`master.count` greater than `1` is not designed for use when `sentinel.enabled=true`.
+
+### Multiple masters (experimental)
+
+When `master.count` is greater than `1`, special care must be taken to create a consistent setup.
+
+An example of use case is the creation of a redundant set of standalone masters or master-replicas per Kubernetes node where you must ensure:
+- No more than `1` master can be deployed per Kubernetes node
+- Replicas and writers can only see the single master of their own Kubernetes node
+
+One way of achieving this is by setting `master.service.internalTrafficPolicy=Local` in combination with a `master.affinity.podAntiAffinity` spec to never schedule more than one master per Kubernetes node.
+
+It's recommended to only change `master.count` if you know what you are doing.
+`master.count` greater than `1` is not designed for use when `sentinel.enabled=true`.
+
+### Using a password file
+
+To use a password file for Redis® you need to create a secret containing the password and then deploy the chart using that secret.
+
+Refer to the chart documentation for more information on [using a password file for Redis®](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/use-password-file/).
+
+### Securing traffic using TLS
+
+TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart:
+
+- `tls.enabled`: Enable TLS support. Defaults to `false`
+- `tls.existingSecret`: Name of the secret that contains the certificates. No defaults.
+- `tls.certFilename`: Certificate filename. No defaults.
+- `tls.certKeyFilename`: Certificate key filename. No defaults.
+- `tls.certCAFilename`: CA Certificate filename. No defaults.
+
+Refer to the chart documentation for more information on [creating the secret and a TLS deployment example](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/enable-tls/).
+
+### Metrics
+
+The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint.
+
+If you have enabled TLS by specifying `tls.enabled=true` you also need to specify TLS option to the metrics exporter. You can do that via `metrics.extraArgs`. You can find the metrics exporter CLI flags for TLS [here](https://github.com/oliver006/redis_exporter#command-line-flags). For example:
+
+You can either specify `metrics.extraArgs.skip-tls-verification=true` to skip TLS verification or providing the following values under `metrics.extraArgs` for TLS client authentication:
+
+```console
+tls-client-key-file
+tls-client-cert-file
+tls-ca-cert-file
+```
+
+### Host Kernel Settings
+
+Redis® may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages.
+
+Refer to the chart documentation for more information on [configuring host kernel settings with an example](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/configure-kernel-settings/).
+
+## Persistence
+
+By default, the chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation.
+
+### Existing PersistentVolumeClaim
+
+1. Create the PersistentVolume
+2. Create the PersistentVolumeClaim
+3. Install the chart
+
+```console
+$ helm install my-release --set master.persistence.existingClaim=PVC_NAME my-repo/redis
+```
+
+## Backup and restore
+
+Refer to the chart documentation for more information on [backing up and restoring Redis® deployments](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/backup-restore/).
+
+## NetworkPolicy
+
+To enable network policy for Redis®, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`.
+
+Refer to the chart documenation for more information on [enabling the network policy in Redis® deployments](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/enable-network-policy/).
+
+### Setting Pod's affinity
+
+This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
+
+As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters.
+
+## Troubleshooting
+
+Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
+
+## Upgrading
+
+A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions.
+
+### To 17.0.0
+
+This major version updates the Redis® docker image version used from `6.2` to `7.0`, the new stable version. There are no major changes in the chart, but we recommend checking the [Redis® 7.0 release notes](https://raw.githubusercontent.com/redis/redis/7.0/00-RELEASENOTES) before upgrading.
+
+### To 16.0.0
+
+This major release renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository.
+
+Affected values:
+- `master.service.port` renamed as `master.service.ports.redis`.
+- `master.service.nodePort` renamed as `master.service.nodePorts.redis`.
+- `replica.service.port` renamed as `replica.service.ports.redis`.
+- `replica.service.nodePort` renamed as `replica.service.nodePorts.redis`.
+- `sentinel.service.port` renamed as `sentinel.service.ports.redis`.
+- `sentinel.service.sentinelPort` renamed as `sentinel.service.ports.sentinel`.
+- `master.containerPort` renamed as `master.containerPorts.redis`.
+- `replica.containerPort` renamed as `replica.containerPorts.redis`.
+- `sentinel.containerPort` renamed as `sentinel.containerPorts.sentinel`.
+- `master.spreadConstraints` renamed as `master.topologySpreadConstraints`
+- `replica.spreadConstraints` renamed as `replica.topologySpreadConstraints`
+
+### To 15.0.0
+
+The parameter to enable the usage of StaticIDs was removed. The behavior is to [always use StaticIDs](https://github.com/bitnami/charts/pull/7278).
+
+### To 14.8.0
+
+The Redis® sentinel exporter was removed in this version because the upstream project was deprecated. The regular Redis® exporter is included in the sentinel scenario as usual.
+
+### To 14.0.0
+
+- Several parameters were renamed or disappeared in favor of new ones on this major version:
+ - The term *slave* has been replaced by the term *replica*. Therefore, parameters prefixed with `slave` are now prefixed with `replicas`.
+ - Credentials parameter are reorganized under the `auth` parameter.
+ - `cluster.enabled` parameter is deprecated in favor of `architecture` parameter that accepts two values: `standalone` and `replication`.
+ - `securityContext.*` is deprecated in favor of `XXX.podSecurityContext` and `XXX.containerSecurityContext`.
+ - `sentinel.metrics.*` parameters are deprecated in favor of `metrics.sentinel.*` ones.
+- New parameters to add custom command, environment variables, sidecars, init containers, etc. were added.
+- Chart labels were adapted to follow the [Helm charts standard labels](https://helm.sh/docs/chart_best_practices/labels/#standard-labels).
+- values.yaml metadata was adapted to follow the format supported by [Readme Generator for Helm](https://github.com/bitnami-labs/readme-generator-for-helm).
+
+Consequences:
+
+Backwards compatibility is not guaranteed. To upgrade to `14.0.0`, install a new release of the Redis® chart, and migrate the data from your previous release. You have 2 alternatives to do so:
+
+- Create a backup of the database, and restore it on the new release as explained in the [Backup and restore](#backup-and-restore) section.
+- Reuse the PVC used to hold the master data on your previous release. To do so, use the `master.persistence.existingClaim` parameter. The following example assumes that the release name is `redis`:
+
+```console
+$ helm install redis my-repo/redis --set auth.password=[PASSWORD] --set master.persistence.existingClaim=[EXISTING_PVC]
+```
+
+| Note: you need to substitute the placeholder _[EXISTING_PVC]_ with the name of the PVC used on your previous release, and _[PASSWORD]_ with the password used in your previous release.
+
+### To 13.0.0
+
+This major version updates the Redis® docker image version used from `6.0` to `6.2`, the new stable version. There are no major changes in the chart and there shouldn't be any breaking changes in it as `6.2` is basically a stricter superset of `6.0`. For more information, please refer to [Redis® 6.2 release notes](https://raw.githubusercontent.com/redis/redis/6.2/00-RELEASENOTES).
+
+### To 12.3.0
+
+This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade.
+
+### To 12.0.0
+
+[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
+
+**What changes were introduced in this major version?**
+
+- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
+- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts
+
+**Considerations when upgrading to this version**
+
+- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues
+- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore
+- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3
+
+**Useful links**
+
+- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/
+- https://helm.sh/docs/topics/v2_v3_migration/
+- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/
+
+### To 11.0.0
+
+When deployed with sentinel enabled, only a group of nodes is deployed and the master/slave role is handled in the group. To avoid breaking the compatibility, the settings for this nodes are given through the `slave.xxxx` parameters in `values.yaml`
+
+### To 9.0.0
+
+The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the Redis® exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter).
+
+### To 7.0.0
+
+In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all.
+
+This version also allows enabling Redis® Sentinel containers inside of the Redis® Pods (feature disabled by default). In case the master crashes, a new Redis® node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster.
+
+### To 11.0.0
+
+When using sentinel, a new statefulset called `-node` was introduced. This will break upgrading from a previous version where the statefulsets are called master and slave. Hence the PVC will not match the new naming and won't be reused. If you want to keep your data, you will need to perform a backup and then a restore the data in this new version.
+
+### To 10.0.0
+
+For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases:
+
+- Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced.
+- Where redis clients need to be updated to support sentinel authentication.
+
+If using a master/slave topology, or with `usePassword: false`, no action is required.
+
+### To 8.0.18
+
+For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details.
+
+### To 7.0.0
+
+This version causes a change in the Redis® Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done:
+
+- Recommended: Create a clone of the Redis® Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC.
+
+ ```
+ $ helm install my-release my-repo/redis --set persistence.existingClaim=<NEW PVC>
+ ```
+
+- Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the Redis® Master StatefulSet. As a consequence, the following commands can be done to upgrade the release
+
+ ```
+ $ helm delete --purge <RELEASE>
+ $ helm install <RELEASE> my-repo/redis
+ ```
+
+Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters.
+
+Some values have changed as well:
+
+- `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves)
+- `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves)
+
+By default, the upgrade will not change the cluster topology. In case you want to use Redis® Sentinel, you must explicitly set `sentinel.enabled` to `true`.
+
+### To 6.0.0
+
+Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`.
+
+### To 5.0.0
+
+The default image in this release may be switched out for any image containing the `redis-server`
+and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command`
+must be specified.
+
+#### Breaking changes
+
+- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`.
+- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values.
+- `master.persistence.path` now defaults to `/data`.
+
+### To 4.0.0
+
+This version removes the `chart` label from the `spec.selector.matchLabels`
+which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently
+added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726.
+
+It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set.
+
+Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable.
+
+In order to upgrade, delete the Redis® StatefulSet before upgrading:
+
+```console
+$ kubectl delete statefulsets.apps --cascade=false my-release-redis-master
+```
+
+And edit the Redis® slave (and metrics if enabled) deployment:
+
+```console
+$ kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
+$ kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
+```
+
+## License
+
+Copyright © 2022 Bitnami
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
\ No newline at end of file
diff --git a/charts/penpot/charts/redis/charts/common/.helmignore b/charts/penpot/charts/redis/charts/common/.helmignore
new file mode 100644
index 0000000..50af031
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/penpot/charts/redis/charts/common/Chart.yaml b/charts/penpot/charts/redis/charts/common/Chart.yaml
new file mode 100644
index 0000000..f9ba944
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/Chart.yaml
@@ -0,0 +1,23 @@
+annotations:
+ category: Infrastructure
+apiVersion: v2
+appVersion: 2.2.2
+description: A Library Helm Chart for grouping common logic between bitnami charts.
+ This chart is not deployable by itself.
+home: https://github.com/bitnami/charts/tree/main/bitnami/common
+icon: https://bitnami.com/downloads/logos/bitnami-mark.png
+keywords:
+- common
+- helper
+- template
+- function
+- bitnami
+maintainers:
+- name: Bitnami
+ url: https://github.com/bitnami/charts
+name: common
+sources:
+- https://github.com/bitnami/charts
+- https://www.bitnami.com/
+type: library
+version: 2.2.2
diff --git a/charts/penpot/charts/redis/charts/common/README.md b/charts/penpot/charts/redis/charts/common/README.md
new file mode 100644
index 0000000..ec43a5f
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/README.md
@@ -0,0 +1,351 @@
+# Bitnami Common Library Chart
+
+A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts.
+
+## TL;DR
+
+```yaml
+dependencies:
+ - name: common
+ version: 1.x.x
+ repository: https://charts.bitnami.com/bitnami
+```
+
+```bash
+$ helm dependency update
+```
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.names.fullname" . }}
+data:
+ myvalue: "Hello World"
+```
+
+## Introduction
+
+This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
+
+## Prerequisites
+
+- Kubernetes 1.19+
+- Helm 3.2.0+
+
+## Parameters
+
+The following table lists the helpers available in the library which are scoped in different sections.
+
+### Affinities
+
+| Helper identifier | Description | Expected Input |
+|-------------------------------|------------------------------------------------------|------------------------------------------------|
+| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` |
+| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` |
+| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` |
+| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` |
+| `common.affinities.topologyKey` | Return a topologyKey definition | `dict "topologyKey" "FOO"` |
+
+### Capabilities
+
+| Helper identifier | Description | Expected Input |
+|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------|
+| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context |
+| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context |
+| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context |
+| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context |
+| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context |
+| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context |
+| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context |
+| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context |
+| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context |
+| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context |
+| `common.capabilities.hpa.apiVersion` | Return the appropriate apiVersion for Horizontal Pod Autoscaler | `.` Chart context |
+| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context |
+
+### Errors
+
+| Helper identifier | Description | Expected Input |
+|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------|
+| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` |
+
+### Images
+
+| Helper identifier | Description | Expected Input |
+|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------|
+| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. |
+| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` |
+| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` |
+
+### Ingress
+
+| Helper identifier | Description | Expected Input |
+|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences |
+| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context |
+| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context |
+| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` |
+
+### Labels
+
+| Helper identifier | Description | Expected Input |
+|-----------------------------|-----------------------------------------------------------------------------|-------------------|
+| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context |
+| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context |
+
+### Names
+
+| Helper identifier | Description | Expected Input |
+|-----------------------------------|-----------------------------------------------------------------------|-------------------|
+| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context |
+| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context |
+| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context |
+| `common.names.fullname.namespace` | Create a fully qualified app name adding the installation's namespace | `.` Chart context |
+| `common.names.chart` | Chart name plus version | `.` Chart context |
+
+### Secrets
+
+| Helper identifier | Description | Expected Input |
+|-----------------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. |
+| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. |
+| `common.secrets.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. |
+| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` |
+
+### Storage
+
+| Helper identifier | Description | Expected Input |
+|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------|
+| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. |
+
+### TplValues
+
+| Helper identifier | Description | Expected Input |
+|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` |
+
+### Utils
+
+| Helper identifier | Description | Expected Input |
+|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------|
+| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` |
+| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` |
+| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` |
+| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` |
+
+### Validations
+
+| Helper identifier | Description | Expected Input |
+|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) |
+| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) |
+| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. |
+| `common.validations.values.mysql.passwords` | This helper will ensure required password for MySQL are not empty. It returns a shared error for all the values. | `dict "secret" "mysql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mysql chart and the helper. |
+| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. |
+| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis® are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. |
+| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. |
+| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. |
+
+### Warnings
+
+| Helper identifier | Description | Expected Input |
+|------------------------------|----------------------------------|------------------------------------------------------------|
+| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. |
+
+## Special input schemas
+
+### ImageRoot
+
+```yaml
+registry:
+ type: string
+ description: Docker registry where the image is located
+ example: docker.io
+
+repository:
+ type: string
+ description: Repository and image name
+ example: bitnami/nginx
+
+tag:
+ type: string
+ description: image tag
+ example: 1.16.1-debian-10-r63
+
+pullPolicy:
+ type: string
+ description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+
+pullSecrets:
+ type: array
+ items:
+ type: string
+ description: Optionally specify an array of imagePullSecrets (evaluated as templates).
+
+debug:
+ type: boolean
+ description: Set to true if you would like to see extra information on logs
+ example: false
+
+## An instance would be:
+# registry: docker.io
+# repository: bitnami/nginx
+# tag: 1.16.1-debian-10-r63
+# pullPolicy: IfNotPresent
+# debug: false
+```
+
+### Persistence
+
+```yaml
+enabled:
+ type: boolean
+ description: Whether enable persistence.
+ example: true
+
+storageClass:
+ type: string
+ description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning.
+ example: "-"
+
+accessMode:
+ type: string
+ description: Access mode for the Persistent Volume Storage.
+ example: ReadWriteOnce
+
+size:
+ type: string
+ description: Size the Persistent Volume Storage.
+ example: 8Gi
+
+path:
+ type: string
+ description: Path to be persisted.
+ example: /bitnami
+
+## An instance would be:
+# enabled: true
+# storageClass: "-"
+# accessMode: ReadWriteOnce
+# size: 8Gi
+# path: /bitnami
+```
+
+### ExistingSecret
+
+```yaml
+name:
+ type: string
+ description: Name of the existing secret.
+ example: mySecret
+keyMapping:
+ description: Mapping between the expected key name and the name of the key in the existing secret.
+ type: object
+
+## An instance would be:
+# name: mySecret
+# keyMapping:
+# password: myPasswordKey
+```
+
+#### Example of use
+
+When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets.
+
+```yaml
+# templates/secret.yaml
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ labels:
+ app: {{ include "common.names.fullname" . }}
+type: Opaque
+data:
+ password: {{ .Values.password | b64enc | quote }}
+
+# templates/dpl.yaml
+---
+...
+ env:
+ - name: PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }}
+ key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }}
+...
+
+# values.yaml
+---
+name: mySecret
+keyMapping:
+ password: myPasswordKey
+```
+
+### ValidateValue
+
+#### NOTES.txt
+
+```console
+{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}}
+{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}}
+
+{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
+```
+
+If we force those values to be empty we will see some alerts
+
+```console
+$ helm install test mychart --set path.to.value00="",path.to.value01=""
+ 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value:
+
+ export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d)
+
+ 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value:
+
+ export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d)
+```
+
+## Upgrading
+
+### To 1.0.0
+
+[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
+
+**What changes were introduced in this major version?**
+
+- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
+- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information.
+- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts
+
+**Considerations when upgrading to this version**
+
+- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues
+- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore
+- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3
+
+**Useful links**
+
+- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/
+- https://helm.sh/docs/topics/v2_v3_migration/
+- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/
+
+## License
+
+Copyright © 2022 Bitnami
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/charts/penpot/charts/redis/charts/common/templates/_affinities.tpl b/charts/penpot/charts/redis/charts/common/templates/_affinities.tpl
new file mode 100644
index 0000000..81902a6
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/templates/_affinities.tpl
@@ -0,0 +1,106 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return a soft nodeAffinity definition
+{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.soft" -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: {{ .key }}
+ operator: In
+ values:
+ {{- range .values }}
+ - {{ . | quote }}
+ {{- end }}
+ weight: 1
+{{- end -}}
+
+{{/*
+Return a hard nodeAffinity definition
+{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.hard" -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: {{ .key }}
+ operator: In
+ values:
+ {{- range .values }}
+ - {{ . | quote }}
+ {{- end }}
+{{- end -}}
+
+{{/*
+Return a nodeAffinity definition
+{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes" -}}
+ {{- if eq .type "soft" }}
+ {{- include "common.affinities.nodes.soft" . -}}
+ {{- else if eq .type "hard" }}
+ {{- include "common.affinities.nodes.hard" . -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Return a topologyKey definition
+{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}}
+*/}}
+{{- define "common.affinities.topologyKey" -}}
+{{ .topologyKey | default "kubernetes.io/hostname" -}}
+{{- end -}}
+
+{{/*
+Return a soft podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.soft" -}}
+{{- $component := default "" .component -}}
+{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }}
+ {{- if not (empty $component) }}
+ {{ printf "app.kubernetes.io/component: %s" $component }}
+ {{- end }}
+ {{- range $key, $value := $extraMatchLabels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+ weight: 1
+{{- end -}}
+
+{{/*
+Return a hard podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.hard" -}}
+{{- $component := default "" .component -}}
+{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }}
+ {{- if not (empty $component) }}
+ {{ printf "app.kubernetes.io/component: %s" $component }}
+ {{- end }}
+ {{- range $key, $value := $extraMatchLabels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+{{- end -}}
+
+{{/*
+Return a podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.pods" -}}
+ {{- if eq .type "soft" }}
+ {{- include "common.affinities.pods.soft" . -}}
+ {{- else if eq .type "hard" }}
+ {{- include "common.affinities.pods.hard" . -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/redis/charts/common/templates/_capabilities.tpl b/charts/penpot/charts/redis/charts/common/templates/_capabilities.tpl
new file mode 100644
index 0000000..9d9b760
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/templates/_capabilities.tpl
@@ -0,0 +1,154 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return the target Kubernetes version
+*/}}
+{{- define "common.capabilities.kubeVersion" -}}
+{{- if .Values.global }}
+ {{- if .Values.global.kubeVersion }}
+ {{- .Values.global.kubeVersion -}}
+ {{- else }}
+ {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
+ {{- end -}}
+{{- else }}
+{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for poddisruptionbudget.
+*/}}
+{{- define "common.capabilities.policy.apiVersion" -}}
+{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "policy/v1beta1" -}}
+{{- else -}}
+{{- print "policy/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for networkpolicy.
+*/}}
+{{- define "common.capabilities.networkPolicy.apiVersion" -}}
+{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for cronjob.
+*/}}
+{{- define "common.capabilities.cronjob.apiVersion" -}}
+{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "batch/v1beta1" -}}
+{{- else -}}
+{{- print "batch/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for deployment.
+*/}}
+{{- define "common.capabilities.deployment.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for statefulset.
+*/}}
+{{- define "common.capabilities.statefulset.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apps/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for ingress.
+*/}}
+{{- define "common.capabilities.ingress.apiVersion" -}}
+{{- if .Values.ingress -}}
+{{- if .Values.ingress.apiVersion -}}
+{{- .Values.ingress.apiVersion -}}
+{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end }}
+{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for RBAC resources.
+*/}}
+{{- define "common.capabilities.rbac.apiVersion" -}}
+{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "rbac.authorization.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "rbac.authorization.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for CRDs.
+*/}}
+{{- define "common.capabilities.crd.apiVersion" -}}
+{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiextensions.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiextensions.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for APIService.
+*/}}
+{{- define "common.capabilities.apiService.apiVersion" -}}
+{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiregistration.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiregistration.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for Horizontal Pod Autoscaler.
+*/}}
+{{- define "common.capabilities.hpa.apiVersion" -}}
+{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}}
+{{- if .beta2 -}}
+{{- print "autoscaling/v2beta2" -}}
+{{- else -}}
+{{- print "autoscaling/v2beta1" -}}
+{{- end -}}
+{{- else -}}
+{{- print "autoscaling/v2" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if the used Helm version is 3.3+.
+A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure.
+This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error.
+**To be removed when the catalog's minimun Helm version is 3.3**
+*/}}
+{{- define "common.capabilities.supportsHelmVersion" -}}
+{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/redis/charts/common/templates/_errors.tpl b/charts/penpot/charts/redis/charts/common/templates/_errors.tpl
new file mode 100644
index 0000000..a79cc2e
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/templates/_errors.tpl
@@ -0,0 +1,23 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Through error when upgrading using empty passwords values that must not be empty.
+
+Usage:
+{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}}
+{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}}
+{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }}
+
+Required password params:
+ - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error.
+ - context - Context - Required. Parent context.
+*/}}
+{{- define "common.errors.upgrade.passwords.empty" -}}
+ {{- $validationErrors := join "" .validationErrors -}}
+ {{- if and $validationErrors .context.Release.IsUpgrade -}}
+ {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}}
+ {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}}
+ {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}}
+ {{- $errorString = print $errorString "\n%s" -}}
+ {{- printf $errorString $validationErrors | fail -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/redis/charts/common/templates/_images.tpl b/charts/penpot/charts/redis/charts/common/templates/_images.tpl
new file mode 100644
index 0000000..46c659e
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/templates/_images.tpl
@@ -0,0 +1,76 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Return the proper image name
+{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }}
+*/}}
+{{- define "common.images.image" -}}
+{{- $registryName := .imageRoot.registry -}}
+{{- $repositoryName := .imageRoot.repository -}}
+{{- $separator := ":" -}}
+{{- $termination := .imageRoot.tag | toString -}}
+{{- if .global }}
+ {{- if .global.imageRegistry }}
+ {{- $registryName = .global.imageRegistry -}}
+ {{- end -}}
+{{- end -}}
+{{- if .imageRoot.digest }}
+ {{- $separator = "@" -}}
+ {{- $termination = .imageRoot.digest | toString -}}
+{{- end -}}
+{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead)
+{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }}
+*/}}
+{{- define "common.images.pullSecrets" -}}
+ {{- $pullSecrets := list }}
+
+ {{- if .global }}
+ {{- range .global.imagePullSecrets -}}
+ {{- $pullSecrets = append $pullSecrets . -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- range .images -}}
+ {{- range .pullSecrets -}}
+ {{- $pullSecrets = append $pullSecrets . -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- if (not (empty $pullSecrets)) }}
+imagePullSecrets:
+ {{- range $pullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names evaluating values as templates
+{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }}
+*/}}
+{{- define "common.images.renderPullSecrets" -}}
+ {{- $pullSecrets := list }}
+ {{- $context := .context }}
+
+ {{- if $context.Values.global }}
+ {{- range $context.Values.global.imagePullSecrets -}}
+ {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- range .images -}}
+ {{- range .pullSecrets -}}
+ {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- if (not (empty $pullSecrets)) }}
+imagePullSecrets:
+ {{- range $pullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+{{- end -}}
diff --git a/charts/penpot/charts/redis/charts/common/templates/_ingress.tpl b/charts/penpot/charts/redis/charts/common/templates/_ingress.tpl
new file mode 100644
index 0000000..831da9c
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/templates/_ingress.tpl
@@ -0,0 +1,68 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Generate backend entry that is compatible with all Kubernetes API versions.
+
+Usage:
+{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }}
+
+Params:
+ - serviceName - String. Name of an existing service backend
+ - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer.
+ - context - Dict - Required. The context for the template evaluation.
+*/}}
+{{- define "common.ingress.backend" -}}
+{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}}
+{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}}
+serviceName: {{ .serviceName }}
+servicePort: {{ .servicePort }}
+{{- else -}}
+service:
+ name: {{ .serviceName }}
+ port:
+ {{- if typeIs "string" .servicePort }}
+ name: {{ .servicePort }}
+ {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }}
+ number: {{ .servicePort | int }}
+ {{- end }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Print "true" if the API pathType field is supported
+Usage:
+{{ include "common.ingress.supportsPathType" . }}
+*/}}
+{{- define "common.ingress.supportsPathType" -}}
+{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}}
+{{- print "false" -}}
+{{- else -}}
+{{- print "true" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if the ingressClassname field is supported
+Usage:
+{{ include "common.ingress.supportsIngressClassname" . }}
+*/}}
+{{- define "common.ingress.supportsIngressClassname" -}}
+{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "false" -}}
+{{- else -}}
+{{- print "true" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if cert-manager required annotations for TLS signed
+certificates are set in the Ingress annotations
+Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
+Usage:
+{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }}
+*/}}
+{{- define "common.ingress.certManagerRequest" -}}
+{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/redis/charts/common/templates/_labels.tpl b/charts/penpot/charts/redis/charts/common/templates/_labels.tpl
new file mode 100644
index 0000000..252066c
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/templates/_labels.tpl
@@ -0,0 +1,18 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Kubernetes standard labels
+*/}}
+{{- define "common.labels.standard" -}}
+app.kubernetes.io/name: {{ include "common.names.name" . }}
+helm.sh/chart: {{ include "common.names.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector
+*/}}
+{{- define "common.labels.matchLabels" -}}
+app.kubernetes.io/name: {{ include "common.names.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
diff --git a/charts/penpot/charts/redis/charts/common/templates/_names.tpl b/charts/penpot/charts/redis/charts/common/templates/_names.tpl
new file mode 100644
index 0000000..617a234
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/templates/_names.tpl
@@ -0,0 +1,66 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "common.names.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "common.names.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "common.names.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified dependency name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+Usage:
+{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }}
+*/}}
+{{- define "common.names.dependency.fullname" -}}
+{{- if .chartValues.fullnameOverride -}}
+{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .chartName .chartValues.nameOverride -}}
+{{- if contains $name .context.Release.Name -}}
+{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Allow the release namespace to be overridden for multi-namespace deployments in combined charts.
+*/}}
+{{- define "common.names.namespace" -}}
+{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a fully qualified app name adding the installation's namespace.
+*/}}
+{{- define "common.names.fullname.namespace" -}}
+{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/charts/penpot/charts/redis/charts/common/templates/_secrets.tpl b/charts/penpot/charts/redis/charts/common/templates/_secrets.tpl
new file mode 100644
index 0000000..a1708b2
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/templates/_secrets.tpl
@@ -0,0 +1,165 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Generate secret name.
+
+Usage:
+{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }}
+
+Params:
+ - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
+ to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+ +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
+ - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment.
+ - context - Dict - Required. The context for the template evaluation.
+*/}}
+{{- define "common.secrets.name" -}}
+{{- $name := (include "common.names.fullname" .context) -}}
+
+{{- if .defaultNameSuffix -}}
+{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- with .existingSecret -}}
+{{- if not (typeIs "string" .) -}}
+{{- with .name -}}
+{{- $name = . -}}
+{{- end -}}
+{{- else -}}
+{{- $name = . -}}
+{{- end -}}
+{{- end -}}
+
+{{- printf "%s" $name -}}
+{{- end -}}
+
+{{/*
+Generate secret key.
+
+Usage:
+{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }}
+
+Params:
+ - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
+ to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+ +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
+ - key - String - Required. Name of the key in the secret.
+*/}}
+{{- define "common.secrets.key" -}}
+{{- $key := .key -}}
+
+{{- if .existingSecret -}}
+ {{- if not (typeIs "string" .existingSecret) -}}
+ {{- if .existingSecret.keyMapping -}}
+ {{- $key = index .existingSecret.keyMapping $.key -}}
+ {{- end -}}
+ {{- end }}
+{{- end -}}
+
+{{- printf "%s" $key -}}
+{{- end -}}
+
+{{/*
+Generate secret password or retrieve one if already created.
+
+Usage:
+{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }}
+
+Params:
+ - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+ - key - String - Required - Name of the key in the secret.
+ - providedValues - List<String> - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
+ - length - int - Optional - Length of the generated random password.
+ - strong - Boolean - Optional - Whether to add symbols to the generated random password.
+ - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart.
+ - context - Context - Required - Parent context.
+
+The order in which this function returns a secret password:
+ 1. Already existing 'Secret' resource
+ (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned)
+ 2. Password provided via the values.yaml
+ (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned)
+ 3. Randomly generated secret password
+ (A new random secret password with the length specified in the 'length' parameter will be generated and returned)
+
+*/}}
+{{- define "common.secrets.passwords.manage" -}}
+
+{{- $password := "" }}
+{{- $subchart := "" }}
+{{- $chartName := default "" .chartName }}
+{{- $passwordLength := default 10 .length }}
+{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }}
+{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }}
+{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }}
+{{- if $secretData }}
+ {{- if hasKey $secretData .key }}
+ {{- $password = index $secretData .key | quote }}
+ {{- else }}
+ {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}}
+ {{- end -}}
+{{- else if $providedPasswordValue }}
+ {{- $password = $providedPasswordValue | toString | b64enc | quote }}
+{{- else }}
+
+ {{- if .context.Values.enabled }}
+ {{- $subchart = $chartName }}
+ {{- end -}}
+
+ {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}}
+ {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}}
+ {{- $passwordValidationErrors := list $requiredPasswordError -}}
+ {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}}
+
+ {{- if .strong }}
+ {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }}
+ {{- $password = randAscii $passwordLength }}
+ {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }}
+ {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }}
+ {{- else }}
+ {{- $password = randAlphaNum $passwordLength | b64enc | quote }}
+ {{- end }}
+{{- end -}}
+{{- printf "%s" $password -}}
+{{- end -}}
+
+{{/*
+Reuses the value from an existing secret, otherwise sets its value to a default value.
+
+Usage:
+{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }}
+
+Params:
+ - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+ - key - String - Required - Name of the key in the secret.
+ - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
+ - context - Context - Required - Parent context.
+
+*/}}
+{{- define "common.secrets.lookup" -}}
+{{- $value := "" -}}
+{{- $defaultValue := required "\n'common.secrets.lookup': Argument 'defaultValue' missing or empty" .defaultValue -}}
+{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}}
+{{- if and $secretData (hasKey $secretData .key) -}}
+ {{- $value = index $secretData .key -}}
+{{- else -}}
+ {{- $value = $defaultValue | toString | b64enc -}}
+{{- end -}}
+{{- printf "%s" $value -}}
+{{- end -}}
+
+{{/*
+Returns whether a previous generated secret already exists
+
+Usage:
+{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }}
+
+Params:
+ - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+ - context - Context - Required - Parent context.
+*/}}
+{{- define "common.secrets.exists" -}}
+{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }}
+{{- if $secret }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/redis/charts/common/templates/_storage.tpl b/charts/penpot/charts/redis/charts/common/templates/_storage.tpl
new file mode 100644
index 0000000..60e2a84
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/templates/_storage.tpl
@@ -0,0 +1,23 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Return the proper Storage Class
+{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }}
+*/}}
+{{- define "common.storage.class" -}}
+
+{{- $storageClass := .persistence.storageClass -}}
+{{- if .global -}}
+ {{- if .global.storageClass -}}
+ {{- $storageClass = .global.storageClass -}}
+ {{- end -}}
+{{- end -}}
+
+{{- if $storageClass -}}
+ {{- if (eq "-" $storageClass) -}}
+ {{- printf "storageClassName: \"\"" -}}
+ {{- else }}
+ {{- printf "storageClassName: %s" $storageClass -}}
+ {{- end -}}
+{{- end -}}
+
+{{- end -}}
diff --git a/charts/penpot/charts/redis/charts/common/templates/_tplvalues.tpl b/charts/penpot/charts/redis/charts/common/templates/_tplvalues.tpl
new file mode 100644
index 0000000..2db1668
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/templates/_tplvalues.tpl
@@ -0,0 +1,13 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Renders a value that contains template.
+Usage:
+{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }}
+*/}}
+{{- define "common.tplvalues.render" -}}
+ {{- if typeIs "string" .value }}
+ {{- tpl .value .context }}
+ {{- else }}
+ {{- tpl (.value | toYaml) .context }}
+ {{- end }}
+{{- end -}}
diff --git a/charts/penpot/charts/redis/charts/common/templates/_utils.tpl b/charts/penpot/charts/redis/charts/common/templates/_utils.tpl
new file mode 100644
index 0000000..b1ead50
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/templates/_utils.tpl
@@ -0,0 +1,62 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Print instructions to get a secret value.
+Usage:
+{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }}
+*/}}
+{{- define "common.utils.secret.getvalue" -}}
+{{- $varname := include "common.utils.fieldToEnvVar" . -}}
+export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d)
+{{- end -}}
+
+{{/*
+Build env var name given a field
+Usage:
+{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }}
+*/}}
+{{- define "common.utils.fieldToEnvVar" -}}
+ {{- $fieldNameSplit := splitList "-" .field -}}
+ {{- $upperCaseFieldNameSplit := list -}}
+
+ {{- range $fieldNameSplit -}}
+ {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}}
+ {{- end -}}
+
+ {{ join "_" $upperCaseFieldNameSplit }}
+{{- end -}}
+
+{{/*
+Gets a value from .Values given
+Usage:
+{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }}
+*/}}
+{{- define "common.utils.getValueFromKey" -}}
+{{- $splitKey := splitList "." .key -}}
+{{- $value := "" -}}
+{{- $latestObj := $.context.Values -}}
+{{- range $splitKey -}}
+ {{- if not $latestObj -}}
+ {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}}
+ {{- end -}}
+ {{- $value = ( index $latestObj . ) -}}
+ {{- $latestObj = $value -}}
+{{- end -}}
+{{- printf "%v" (default "" $value) -}}
+{{- end -}}
+
+{{/*
+Returns first .Values key with a defined value or first of the list if all non-defined
+Usage:
+{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }}
+*/}}
+{{- define "common.utils.getKeyFromList" -}}
+{{- $key := first .keys -}}
+{{- $reverseKeys := reverse .keys }}
+{{- range $reverseKeys }}
+ {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }}
+ {{- if $value -}}
+ {{- $key = . }}
+ {{- end -}}
+{{- end -}}
+{{- printf "%s" $key -}}
+{{- end -}}
diff --git a/charts/penpot/charts/redis/charts/common/templates/_warnings.tpl b/charts/penpot/charts/redis/charts/common/templates/_warnings.tpl
new file mode 100644
index 0000000..ae10fa4
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/templates/_warnings.tpl
@@ -0,0 +1,14 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Warning about using rolling tag.
+Usage:
+{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }}
+*/}}
+{{- define "common.warnings.rollingTag" -}}
+
+{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }}
+WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/
+{{- end }}
+
+{{- end -}}
diff --git a/charts/penpot/charts/redis/charts/common/templates/validations/_cassandra.tpl b/charts/penpot/charts/redis/charts/common/templates/validations/_cassandra.tpl
new file mode 100644
index 0000000..ded1ae3
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/templates/validations/_cassandra.tpl
@@ -0,0 +1,72 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate Cassandra required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret"
+ - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.cassandra.passwords" -}}
+ {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}}
+ {{- $enabled := include "common.cassandra.values.enabled" . -}}
+ {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}}
+ {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.cassandra.values.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.cassandra.values.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.cassandra.dbUser.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.dbUser.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled cassandra.
+
+Usage:
+{{ include "common.cassandra.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.cassandra.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.cassandra.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key dbUser
+
+Usage:
+{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.cassandra.values.key.dbUser" -}}
+ {{- if .subchart -}}
+ cassandra.dbUser
+ {{- else -}}
+ dbUser
+ {{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/redis/charts/common/templates/validations/_mariadb.tpl b/charts/penpot/charts/redis/charts/common/templates/validations/_mariadb.tpl
new file mode 100644
index 0000000..b6906ff
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/templates/validations/_mariadb.tpl
@@ -0,0 +1,103 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MariaDB required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret"
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mariadb.passwords" -}}
+ {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}}
+ {{- $enabled := include "common.mariadb.values.enabled" . -}}
+ {{- $architecture := include "common.mariadb.values.architecture" . -}}
+ {{- $authPrefix := include "common.mariadb.values.key.auth" . -}}
+ {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+ {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+ {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+ {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+ {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+ {{- if not (empty $valueUsername) -}}
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+ {{- end -}}
+
+ {{- if (eq $architecture "replication") -}}
+ {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.auth.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mariadb.auth.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.auth.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mariadb.
+
+Usage:
+{{ include "common.mariadb.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mariadb.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.mariadb.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.architecture" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mariadb.architecture -}}
+ {{- else -}}
+ {{- .context.Values.architecture -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.key.auth" -}}
+ {{- if .subchart -}}
+ mariadb.auth
+ {{- else -}}
+ auth
+ {{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/redis/charts/common/templates/validations/_mongodb.tpl b/charts/penpot/charts/redis/charts/common/templates/validations/_mongodb.tpl
new file mode 100644
index 0000000..f820ec1
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/templates/validations/_mongodb.tpl
@@ -0,0 +1,108 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MongoDB® required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret"
+ - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mongodb.passwords" -}}
+ {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}}
+ {{- $enabled := include "common.mongodb.values.enabled" . -}}
+ {{- $authPrefix := include "common.mongodb.values.key.auth" . -}}
+ {{- $architecture := include "common.mongodb.values.architecture" . -}}
+ {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+ {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+ {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}}
+ {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+ {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}}
+ {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}}
+
+ {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+ {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+ {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }}
+ {{- if and $valueUsername $valueDatabase -}}
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+ {{- end -}}
+
+ {{- if (eq $architecture "replicaset") -}}
+ {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.auth.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mongodb.auth.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.auth.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mongodb.
+
+Usage:
+{{ include "common.mongodb.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mongodb.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.mongodb.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.key.auth" -}}
+ {{- if .subchart -}}
+ mongodb.auth
+ {{- else -}}
+ auth
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.architecture" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mongodb.architecture -}}
+ {{- else -}}
+ {{- .context.Values.architecture -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/redis/charts/common/templates/validations/_mysql.tpl b/charts/penpot/charts/redis/charts/common/templates/validations/_mysql.tpl
new file mode 100644
index 0000000..74472a0
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/templates/validations/_mysql.tpl
@@ -0,0 +1,103 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MySQL required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret"
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mysql.passwords" -}}
+ {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}}
+ {{- $enabled := include "common.mysql.values.enabled" . -}}
+ {{- $architecture := include "common.mysql.values.architecture" . -}}
+ {{- $authPrefix := include "common.mysql.values.key.auth" . -}}
+ {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+ {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+ {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+ {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+ {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+ {{- if not (empty $valueUsername) -}}
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+ {{- end -}}
+
+ {{- if (eq $architecture "replication") -}}
+ {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.auth.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mysql.auth.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.auth.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mysql.
+
+Usage:
+{{ include "common.mysql.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mysql.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.mysql.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.architecture" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mysql.architecture -}}
+ {{- else -}}
+ {{- .context.Values.architecture -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.key.auth" -}}
+ {{- if .subchart -}}
+ mysql.auth
+ {{- else -}}
+ auth
+ {{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/redis/charts/common/templates/validations/_postgresql.tpl b/charts/penpot/charts/redis/charts/common/templates/validations/_postgresql.tpl
new file mode 100644
index 0000000..164ec0d
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/templates/validations/_postgresql.tpl
@@ -0,0 +1,129 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate PostgreSQL required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret"
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.postgresql.passwords" -}}
+ {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}}
+ {{- $enabled := include "common.postgresql.values.enabled" . -}}
+ {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}}
+ {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}}
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+ {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}}
+
+ {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}}
+ {{- if (eq $enabledReplication "true") -}}
+ {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to decide whether evaluate global values.
+
+Usage:
+{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }}
+Params:
+ - key - String - Required. Field to be evaluated within global, e.g: "existingSecret"
+*/}}
+{{- define "common.postgresql.values.use.global" -}}
+ {{- if .context.Values.global -}}
+ {{- if .context.Values.global.postgresql -}}
+ {{- index .context.Values.global.postgresql .key | quote -}}
+ {{- end -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.postgresql.values.existingSecret" (dict "context" $) }}
+*/}}
+{{- define "common.postgresql.values.existingSecret" -}}
+ {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}}
+
+ {{- if .subchart -}}
+ {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}}
+ {{- else -}}
+ {{- default (.context.Values.existingSecret | quote) $globalValue -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled postgresql.
+
+Usage:
+{{ include "common.postgresql.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.postgresql.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.postgresql.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key postgressPassword.
+
+Usage:
+{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.key.postgressPassword" -}}
+ {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}}
+
+ {{- if not $globalValue -}}
+ {{- if .subchart -}}
+ postgresql.postgresqlPassword
+ {{- else -}}
+ postgresqlPassword
+ {{- end -}}
+ {{- else -}}
+ global.postgresql.postgresqlPassword
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled.replication.
+
+Usage:
+{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.enabled.replication" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.postgresql.replication.enabled -}}
+ {{- else -}}
+ {{- printf "%v" .context.Values.replication.enabled -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key replication.password.
+
+Usage:
+{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.key.replicationPassword" -}}
+ {{- if .subchart -}}
+ postgresql.replication.password
+ {{- else -}}
+ replication.password
+ {{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/redis/charts/common/templates/validations/_redis.tpl b/charts/penpot/charts/redis/charts/common/templates/validations/_redis.tpl
new file mode 100644
index 0000000..dcccfc1
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/templates/validations/_redis.tpl
@@ -0,0 +1,76 @@
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate Redis® required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret"
+ - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.redis.passwords" -}}
+ {{- $enabled := include "common.redis.values.enabled" . -}}
+ {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}}
+ {{- $standarizedVersion := include "common.redis.values.standarized.version" . }}
+
+ {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }}
+ {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }}
+
+ {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }}
+ {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}}
+ {{- if eq $useAuth "true" -}}
+ {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled redis.
+
+Usage:
+{{ include "common.redis.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.redis.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.redis.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right prefix path for the values
+
+Usage:
+{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false
+*/}}
+{{- define "common.redis.values.keys.prefix" -}}
+ {{- if .subchart -}}redis.{{- else -}}{{- end -}}
+{{- end -}}
+
+{{/*
+Checks whether the redis chart's includes the standarizations (version >= 14)
+
+Usage:
+{{ include "common.redis.values.standarized.version" (dict "context" $) }}
+*/}}
+{{- define "common.redis.values.standarized.version" -}}
+
+ {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}}
+ {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }}
+
+ {{- if $standarizedAuthValues -}}
+ {{- true -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/redis/charts/common/templates/validations/_validations.tpl b/charts/penpot/charts/redis/charts/common/templates/validations/_validations.tpl
new file mode 100644
index 0000000..9a814cf
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/templates/validations/_validations.tpl
@@ -0,0 +1,46 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate values must not be empty.
+
+Usage:
+{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}}
+{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}}
+{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
+
+Validate value params:
+ - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password"
+ - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret"
+ - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password"
+*/}}
+{{- define "common.validations.values.multiple.empty" -}}
+ {{- range .required -}}
+ {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Validate a value must not be empty.
+
+Usage:
+{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }}
+
+Validate value params:
+ - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password"
+ - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret"
+ - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password"
+ - subchart - String - Optional - Name of the subchart that the validated password is part of.
+*/}}
+{{- define "common.validations.values.single.empty" -}}
+ {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }}
+ {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }}
+
+ {{- if not $value -}}
+ {{- $varname := "my-value" -}}
+ {{- $getCurrentValue := "" -}}
+ {{- if and .secret .field -}}
+ {{- $varname = include "common.utils.fieldToEnvVar" . -}}
+ {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}}
+ {{- end -}}
+ {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/penpot/charts/redis/charts/common/values.yaml b/charts/penpot/charts/redis/charts/common/values.yaml
new file mode 100644
index 0000000..f2df68e
--- /dev/null
+++ b/charts/penpot/charts/redis/charts/common/values.yaml
@@ -0,0 +1,5 @@
+## bitnami/common
+## It is required by CI/CD tools and processes.
+## @skip exampleValue
+##
+exampleValue: common-chart
diff --git a/charts/penpot/charts/redis/img/redis-cluster-topology.png b/charts/penpot/charts/redis/img/redis-cluster-topology.png
new file mode 100644
index 0000000..f0a02a9
--- /dev/null
+++ b/charts/penpot/charts/redis/img/redis-cluster-topology.png
Binary files differ
diff --git a/charts/penpot/charts/redis/img/redis-topology.png b/charts/penpot/charts/redis/img/redis-topology.png
new file mode 100644
index 0000000..3f5280f
--- /dev/null
+++ b/charts/penpot/charts/redis/img/redis-topology.png
Binary files differ
diff --git a/charts/penpot/charts/redis/templates/NOTES.txt b/charts/penpot/charts/redis/templates/NOTES.txt
new file mode 100644
index 0000000..2623ade
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/NOTES.txt
@@ -0,0 +1,191 @@
+CHART NAME: {{ .Chart.Name }}
+CHART VERSION: {{ .Chart.Version }}
+APP VERSION: {{ .Chart.AppVersion }}
+
+** Please be patient while the chart is being deployed **
+
+{{- if .Values.diagnosticMode.enabled }}
+The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with:
+
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }}
+
+Get the list of pods by executing:
+
+ kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}
+
+Access the pod you want to debug by executing
+
+ kubectl exec --namespace {{ .Release.Namespace }} -ti <NAME OF THE POD> -- bash
+
+In order to replicate the container startup scripts execute this command:
+
+For Redis:
+
+ /opt/bitnami/scripts/redis/entrypoint.sh /opt/bitnami/scripts/redis/run.sh
+
+{{- if .Values.sentinel.enabled }}
+
+For Redis Sentinel:
+
+ /opt/bitnami/scripts/redis-sentinel/entrypoint.sh /opt/bitnami/scripts/redis-sentinel/run.sh
+
+{{- end }}
+{{- else }}
+
+{{- if contains .Values.master.service.type "LoadBalancer" }}
+{{- if not .Values.auth.enabled }}
+{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }}
+
+-------------------------------------------------------------------------------
+ WARNING
+
+ By specifying "master.service.type=LoadBalancer" and "auth.enabled=false" you have
+ most likely exposed the Redis® service externally without any authentication
+ mechanism.
+
+ For security reasons, we strongly suggest that you switch to "ClusterIP" or
+ "NodePort". As alternative, you can also switch to "auth.enabled=true"
+ providing a valid password on "password" parameter.
+
+-------------------------------------------------------------------------------
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{- if eq .Values.architecture "replication" }}
+{{- if .Values.sentinel.enabled }}
+
+Redis® can be accessed via port {{ .Values.sentinel.service.ports.redis }} on the following DNS name from within your cluster:
+
+ {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read only operations
+
+For read/write operations, first access the Redis® Sentinel cluster, which is available in port {{ .Values.sentinel.service.ports.sentinel }} using the same domain name above.
+
+{{- else }}
+
+Redis® can be accessed on the following DNS names from within your cluster:
+
+ {{ printf "%s-master.%s.svc.%s" (include "common.names.fullname" .) .Release.Namespace .Values.clusterDomain }} for read/write operations (port {{ .Values.master.service.ports.redis }})
+ {{ printf "%s-replicas.%s.svc.%s" (include "common.names.fullname" .) .Release.Namespace .Values.clusterDomain }} for read-only operations (port {{ .Values.replica.service.ports.redis }})
+
+{{- end }}
+{{- else }}
+
+Redis® can be accessed via port {{ .Values.master.service.ports.redis }} on the following DNS name from within your cluster:
+
+ {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}
+
+{{- end }}
+
+{{ if .Values.auth.enabled }}
+
+To get your password run:
+
+ export REDIS_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 -d)
+
+{{- end }}
+
+To connect to your Redis® server:
+
+1. Run a Redis® pod that you can use as a client:
+
+ kubectl run --namespace {{ .Release.Namespace }} redis-client --restart='Never' {{ if .Values.auth.enabled }} --env REDIS_PASSWORD=$REDIS_PASSWORD {{ end }} --image {{ template "redis.image" . }} --command -- sleep infinity
+
+{{- if .Values.tls.enabled }}
+
+ Copy your TLS certificates to the pod:
+
+ kubectl cp --namespace {{ .Release.Namespace }} /path/to/client.cert redis-client:/tmp/client.cert
+ kubectl cp --namespace {{ .Release.Namespace }} /path/to/client.key redis-client:/tmp/client.key
+ kubectl cp --namespace {{ .Release.Namespace }} /path/to/CA.cert redis-client:/tmp/CA.cert
+
+{{- end }}
+
+ Use the following command to attach to the pod:
+
+ kubectl exec --tty -i redis-client \
+ {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "common.names.fullname" . }}-client=true" \{{- end }}
+ --namespace {{ .Release.Namespace }} -- bash
+
+2. Connect using the Redis® CLI:
+
+{{- if eq .Values.architecture "replication" }}
+ {{- if .Values.sentinel.enabled }}
+ {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h {{ template "common.names.fullname" . }} -p {{ .Values.sentinel.service.ports.redis }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Read only operations
+ {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h {{ template "common.names.fullname" . }} -p {{ .Values.sentinel.service.ports.sentinel }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Sentinel access
+ {{- else }}
+ {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h {{ printf "%s-master" (include "common.names.fullname" .) }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }}
+ {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h {{ printf "%s-replicas" (include "common.names.fullname" .) }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }}
+ {{- end }}
+{{- else }}
+ {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h {{ template "common.names.fullname" . }}-master{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }}
+{{- end }}
+
+{{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}
+
+Note: Since NetworkPolicy is enabled, only pods with label {{ template "common.names.fullname" . }}-client=true" will be able to connect to redis.
+
+{{- else }}
+
+To connect to your database from outside the cluster execute the following commands:
+
+{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }}
+{{- if contains "NodePort" .Values.sentinel.service.type }}
+
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }})
+ {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }}
+
+{{- else if contains "LoadBalancer" .Values.sentinel.service.type }}
+
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "common.names.fullname" . }}'
+
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
+ {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h $SERVICE_IP -p {{ .Values.sentinel.service.ports.redis }} {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }}
+
+{{- else if contains "ClusterIP" .Values.sentinel.service.type }}
+
+ kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "common.names.fullname" . }} {{ .Values.sentinel.service.ports.redis }}:{{ .Values.sentinel.service.ports.redis }} &
+ {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h 127.0.0.1 -p {{ .Values.sentinel.service.ports.redis }} {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }}
+
+{{- end }}
+{{- else }}
+{{- if contains "NodePort" .Values.master.service.type }}
+
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ printf "%s-master" (include "common.names.fullname" .) }})
+ {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }}
+
+{{- else if contains "LoadBalancer" .Values.master.service.type }}
+
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "common.names.fullname" . }}'
+
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ printf "%s-master" (include "common.names.fullname" .) }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
+ {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h $SERVICE_IP -p {{ .Values.master.service.ports.redis }} {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }}
+
+{{- else if contains "ClusterIP" .Values.master.service.type }}
+
+ kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ printf "%s-master" (include "common.names.fullname" .) }} {{ .Values.master.service.ports.redis }}:{{ .Values.master.service.ports.redis }} &
+ {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h 127.0.0.1 -p {{ .Values.master.service.ports.redis }} {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }}
+
+{{- end }}
+{{- end }}
+
+{{- end }}
+{{- end }}
+{{- include "redis.checkRollingTags" . }}
+{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }}
+{{- include "common.warnings.rollingTag" .Values.sysctl.image }}
+{{- include "redis.validateValues" . }}
+
+{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled (eq .Values.sentinel.service.type "NodePort") (not .Release.IsUpgrade ) }}
+{{- if $.Values.sentinel.service.nodePorts.sentinel }}
+No need to upgrade, ports and nodeports have been set from values
+{{- else }}
+#!#!#!#!#!#!#!# IMPORTANT #!#!#!#!#!#!#!#
+YOU NEED TO PERFORM AN UPGRADE FOR THE SERVICES AND WORKLOAD TO BE CREATED
+{{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/_helpers.tpl b/charts/penpot/charts/redis/templates/_helpers.tpl
new file mode 100644
index 0000000..90064e8
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/_helpers.tpl
@@ -0,0 +1,321 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return the proper Redis image name
+*/}}
+{{- define "redis.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper Redis Sentinel image name
+*/}}
+{{- define "redis.sentinel.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.sentinel.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper image name (for the metrics image)
+*/}}
+{{- define "redis.metrics.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper image name (for the init container volume-permissions image)
+*/}}
+{{- define "redis.volumePermissions.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return sysctl image
+*/}}
+{{- define "redis.sysctl.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.sysctl.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names
+*/}}
+{{- define "redis.imagePullSecrets" -}}
+{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.sentinel.image .Values.metrics.image .Values.volumePermissions.image .Values.sysctl.image) "global" .Values.global) -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for networkpolicy.
+*/}}
+{{- define "networkPolicy.apiVersion" -}}
+{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiGroup for PodSecurityPolicy.
+*/}}
+{{- define "podSecurityPolicy.apiGroup" -}}
+{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "policy" -}}
+{{- else -}}
+{{- print "extensions" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a TLS secret object should be created
+*/}}
+{{- define "redis.createTlsSecret" -}}
+{{- if and .Values.tls.enabled .Values.tls.autoGenerated (and (not .Values.tls.existingSecret) (not .Values.tls.certificatesSecret)) }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the secret containing Redis TLS certificates
+*/}}
+{{- define "redis.tlsSecretName" -}}
+{{- $secretName := coalesce .Values.tls.existingSecret .Values.tls.certificatesSecret -}}
+{{- if $secretName -}}
+ {{- printf "%s" (tpl $secretName $) -}}
+{{- else -}}
+ {{- printf "%s-crt" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the cert file.
+*/}}
+{{- define "redis.tlsCert" -}}
+{{- if (include "redis.createTlsSecret" . ) -}}
+ {{- printf "/opt/bitnami/redis/certs/%s" "tls.crt" -}}
+{{- else -}}
+ {{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/redis/certs/%s" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the cert key file.
+*/}}
+{{- define "redis.tlsCertKey" -}}
+{{- if (include "redis.createTlsSecret" . ) -}}
+ {{- printf "/opt/bitnami/redis/certs/%s" "tls.key" -}}
+{{- else -}}
+ {{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/redis/certs/%s" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the CA cert file.
+*/}}
+{{- define "redis.tlsCACert" -}}
+{{- if (include "redis.createTlsSecret" . ) -}}
+ {{- printf "/opt/bitnami/redis/certs/%s" "ca.crt" -}}
+{{- else -}}
+ {{- required "Certificate CA filename is required when TLS in enabled" .Values.tls.certCAFilename | printf "/opt/bitnami/redis/certs/%s" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the DH params file.
+*/}}
+{{- define "redis.tlsDHParams" -}}
+{{- if .Values.tls.dhParamsFilename -}}
+{{- printf "/opt/bitnami/redis/certs/%s" .Values.tls.dhParamsFilename -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the shared service account to use
+*/}}
+{{- define "redis.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the master service account to use
+*/}}
+{{- define "redis.masterServiceAccountName" -}}
+{{- if .Values.master.serviceAccount.create -}}
+ {{ default (printf "%s-master" (include "common.names.fullname" .)) .Values.master.serviceAccount.name }}
+{{- else -}}
+ {{- if .Values.serviceAccount.create -}}
+ {{ template "redis.serviceAccountName" . }}
+ {{- else -}}
+ {{ default "default" .Values.master.serviceAccount.name }}
+ {{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the replicas service account to use
+*/}}
+{{- define "redis.replicaServiceAccountName" -}}
+{{- if .Values.replica.serviceAccount.create -}}
+ {{ default (printf "%s-replica" (include "common.names.fullname" .)) .Values.replica.serviceAccount.name }}
+{{- else -}}
+ {{- if .Values.serviceAccount.create -}}
+ {{ template "redis.serviceAccountName" . }}
+ {{- else -}}
+ {{ default "default" .Values.replica.serviceAccount.name }}
+ {{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the configuration configmap name
+*/}}
+{{- define "redis.configmapName" -}}
+{{- if .Values.existingConfigmap -}}
+ {{- printf "%s" (tpl .Values.existingConfigmap $) -}}
+{{- else -}}
+ {{- printf "%s-configuration" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a configmap object should be created
+*/}}
+{{- define "redis.createConfigmap" -}}
+{{- if empty .Values.existingConfigmap }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the password secret.
+*/}}
+{{- define "redis.secretName" -}}
+{{- if .Values.auth.existingSecret -}}
+{{- printf "%s" (tpl .Values.auth.existingSecret $) -}}
+{{- else -}}
+{{- printf "%s" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the password key to be retrieved from Redis® secret.
+*/}}
+{{- define "redis.secretPasswordKey" -}}
+{{- if and .Values.auth.existingSecret .Values.auth.existingSecretPasswordKey -}}
+{{- printf "%s" .Values.auth.existingSecretPasswordKey -}}
+{{- else -}}
+{{- printf "redis-password" -}}
+{{- end -}}
+{{- end -}}
+
+
+{{/*
+Returns the available value for certain key in an existing secret (if it exists),
+otherwise it generates a random value.
+*/}}
+{{- define "getValueFromSecret" }}
+ {{- $len := (default 16 .Length) | int -}}
+ {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}}
+ {{- if $obj }}
+ {{- index $obj .Key | b64dec -}}
+ {{- else -}}
+ {{- randAlphaNum $len -}}
+ {{- end -}}
+{{- end }}
+
+{{/*
+Return Redis® password
+*/}}
+{{- define "redis.password" -}}
+{{- if not (empty .Values.global.redis.password) }}
+ {{- .Values.global.redis.password -}}
+{{- else if not (empty .Values.auth.password) -}}
+ {{- .Values.auth.password -}}
+{{- else -}}
+ {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "redis-password") -}}
+{{- end -}}
+{{- end -}}
+
+{{/* Check if there are rolling tags in the images */}}
+{{- define "redis.checkRollingTags" -}}
+{{- include "common.warnings.rollingTag" .Values.image }}
+{{- include "common.warnings.rollingTag" .Values.sentinel.image }}
+{{- include "common.warnings.rollingTag" .Values.metrics.image }}
+{{- end -}}
+
+{{/*
+Compile all warnings into a single message, and call fail.
+*/}}
+{{- define "redis.validateValues" -}}
+{{- $messages := list -}}
+{{- $messages := append $messages (include "redis.validateValues.topologySpreadConstraints" .) -}}
+{{- $messages := append $messages (include "redis.validateValues.architecture" .) -}}
+{{- $messages := append $messages (include "redis.validateValues.podSecurityPolicy.create" .) -}}
+{{- $messages := append $messages (include "redis.validateValues.tls" .) -}}
+{{- $messages := without $messages "" -}}
+{{- $message := join "\n" $messages -}}
+
+{{- if $message -}}
+{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}}
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Redis® - spreadConstrainsts K8s version */}}
+{{- define "redis.validateValues.topologySpreadConstraints" -}}
+{{- if and (semverCompare "<1.16-0" .Capabilities.KubeVersion.GitVersion) .Values.replica.topologySpreadConstraints -}}
+redis: topologySpreadConstraints
+ Pod Topology Spread Constraints are only available on K8s >= 1.16
+ Find more information at https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Redis® - must provide a valid architecture */}}
+{{- define "redis.validateValues.architecture" -}}
+{{- if and (ne .Values.architecture "standalone") (ne .Values.architecture "replication") -}}
+redis: architecture
+ Invalid architecture selected. Valid values are "standalone" and
+ "replication". Please set a valid architecture (--set architecture="xxxx")
+{{- end -}}
+{{- if and .Values.sentinel.enabled (not (eq .Values.architecture "replication")) }}
+redis: architecture
+ Using redis sentinel on standalone mode is not supported.
+ To deploy redis sentinel, please select the "replication" mode
+ (--set "architecture=replication,sentinel.enabled=true")
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Redis® - PodSecurityPolicy create */}}
+{{- define "redis.validateValues.podSecurityPolicy.create" -}}
+{{- if and .Values.podSecurityPolicy.create (not .Values.podSecurityPolicy.enabled) }}
+redis: podSecurityPolicy.create
+ In order to create PodSecurityPolicy, you also need to enable
+ podSecurityPolicy.enabled field
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Redis® - TLS enabled */}}
+{{- define "redis.validateValues.tls" -}}
+{{- if and .Values.tls.enabled (not .Values.tls.autoGenerated) (not .Values.tls.existingSecret) (not .Values.tls.certificatesSecret) }}
+redis: tls.enabled
+ In order to enable TLS, you also need to provide
+ an existing secret containing the TLS certificates or
+ enable auto-generated certificates.
+{{- end -}}
+{{- end -}}
+
+{{/* Define the suffix utilized for external-dns */}}
+{{- define "redis.externalDNS.suffix" -}}
+{{ printf "%s.%s" (include "common.names.fullname" .) .Values.useExternalDNS.suffix }}
+{{- end -}}
+
+{{/* Compile all annotations utilized for external-dns */}}
+{{- define "redis.externalDNS.annotations" -}}
+{{- if and .Values.useExternalDNS.enabled .Values.useExternalDNS.annotationKey }}
+{{ .Values.useExternalDNS.annotationKey }}hostname: {{ include "redis.externalDNS.suffix" . }}
+{{- range $key, $val := .Values.useExternalDNS.additionalAnnotations }}
+{{ $.Values.useExternalDNS.annotationKey }}{{ $key }}: {{ $val | quote }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/configmap.yaml b/charts/penpot/charts/redis/templates/configmap.yaml
new file mode 100644
index 0000000..9e70a38
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/configmap.yaml
@@ -0,0 +1,59 @@
+{{- if (include "redis.createConfigmap" .) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-configuration" (include "common.names.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+ redis.conf: |-
+ # User-supplied common configuration:
+ {{- if .Values.commonConfiguration }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonConfiguration "context" $ ) | nindent 4 }}
+ {{- end }}
+ # End of common configuration
+ master.conf: |-
+ dir {{ .Values.master.persistence.path }}
+ # User-supplied master configuration:
+ {{- if .Values.master.configuration }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.master.configuration "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.master.disableCommands }}
+ {{- range .Values.master.disableCommands }}
+ rename-command {{ . }} ""
+ {{- end }}
+ {{- end }}
+ # End of master configuration
+ replica.conf: |-
+ dir {{ .Values.replica.persistence.path }}
+ # User-supplied replica configuration:
+ {{- if .Values.replica.configuration }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.replica.configuration "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.replica.disableCommands }}
+ {{- range .Values.replica.disableCommands }}
+ rename-command {{ . }} ""
+ {{- end }}
+ {{- end }}
+ # End of replica configuration
+ {{- if .Values.sentinel.enabled }}
+ sentinel.conf: |-
+ dir "/tmp"
+ port {{ .Values.sentinel.containerPorts.sentinel }}
+ sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "common.names.fullname" . }}-node-0.{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.sentinel.service.ports.redis }} {{ .Values.sentinel.quorum }}
+ sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }}
+ sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }}
+ sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }}
+ # User-supplied sentinel configuration:
+ {{- if .Values.sentinel.configuration }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.configuration "context" $ ) | nindent 4 }}
+ {{- end }}
+ # End of sentinel configuration
+ {{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/extra-list.yaml b/charts/penpot/charts/redis/templates/extra-list.yaml
new file mode 100644
index 0000000..9ac65f9
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/extra-list.yaml
@@ -0,0 +1,4 @@
+{{- range .Values.extraDeploy }}
+---
+{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/headless-svc.yaml b/charts/penpot/charts/redis/templates/headless-svc.yaml
new file mode 100644
index 0000000..d798a0b
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/headless-svc.yaml
@@ -0,0 +1,30 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ printf "%s-headless" (include "common.names.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ annotations:
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- include "redis.externalDNS.annotations" . | nindent 4 }}
+spec:
+ type: ClusterIP
+ clusterIP: None
+ {{- if .Values.sentinel.enabled }}
+ publishNotReadyAddresses: true
+ {{- end }}
+ ports:
+ - name: tcp-redis
+ port: {{ if .Values.sentinel.enabled }}{{ .Values.sentinel.service.ports.redis }}{{ else }}{{ .Values.master.service.ports.redis }}{{ end }}
+ targetPort: redis
+ {{- if .Values.sentinel.enabled }}
+ - name: tcp-sentinel
+ port: {{ .Values.sentinel.service.ports.sentinel }}
+ targetPort: redis-sentinel
+ {{- end }}
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
diff --git a/charts/penpot/charts/redis/templates/health-configmap.yaml b/charts/penpot/charts/redis/templates/health-configmap.yaml
new file mode 100644
index 0000000..41f3145
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/health-configmap.yaml
@@ -0,0 +1,192 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-health" (include "common.names.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+ ping_readiness_local.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h localhost \
+{{- if .Values.tls.enabled }}
+ -p $REDIS_TLS_PORT \
+ --tls \
+ --cacert {{ template "redis.tlsCACert" . }} \
+ {{- if .Values.tls.authClients }}
+ --cert {{ template "redis.tlsCert" . }} \
+ --key {{ template "redis.tlsCertKey" . }} \
+ {{- end }}
+{{- else }}
+ -p $REDIS_PORT \
+{{- end }}
+ ping
+ )
+ if [ "$?" -eq "124" ]; then
+ echo "Timed out"
+ exit 1
+ fi
+ if [ "$response" != "PONG" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_liveness_local.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h localhost \
+{{- if .Values.tls.enabled }}
+ -p $REDIS_TLS_PORT \
+ --tls \
+ --cacert {{ template "redis.tlsCACert" . }} \
+ {{- if .Values.tls.authClients }}
+ --cert {{ template "redis.tlsCert" . }} \
+ --key {{ template "redis.tlsCertKey" . }} \
+ {{- end }}
+{{- else }}
+ -p $REDIS_PORT \
+{{- end }}
+ ping
+ )
+ if [ "$?" -eq "124" ]; then
+ echo "Timed out"
+ exit 1
+ fi
+ responseFirstWord=$(echo $response | head -n1 | awk '{print $1;}')
+ if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ] && [ "$responseFirstWord" != "MASTERDOWN" ]; then
+ echo "$response"
+ exit 1
+ fi
+{{- if .Values.sentinel.enabled }}
+ ping_sentinel.sh: |-
+ #!/bin/bash
+
+{{- if .Values.auth.sentinel }}
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
+{{- end }}
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h localhost \
+{{- if .Values.tls.enabled }}
+ -p $REDIS_SENTINEL_TLS_PORT_NUMBER \
+ --tls \
+ --cacert "$REDIS_SENTINEL_TLS_CA_FILE" \
+ {{- if .Values.tls.authClients }}
+ --cert "$REDIS_SENTINEL_TLS_CERT_FILE" \
+ --key "$REDIS_SENTINEL_TLS_KEY_FILE" \
+ {{- end }}
+{{- else }}
+ -p $REDIS_SENTINEL_PORT \
+{{- end }}
+ ping
+ )
+ if [ "$?" -eq "124" ]; then
+ echo "Timed out"
+ exit 1
+ fi
+ if [ "$response" != "PONG" ]; then
+ echo "$response"
+ exit 1
+ fi
+ parse_sentinels.awk: |-
+ /ip/ {FOUND_IP=1}
+ /port/ {FOUND_PORT=1}
+ /runid/ {FOUND_RUNID=1}
+ !/ip|port|runid/ {
+ if (FOUND_IP==1) {
+ IP=$1; FOUND_IP=0;
+ }
+ else if (FOUND_PORT==1) {
+ PORT=$1;
+ FOUND_PORT=0;
+ } else if (FOUND_RUNID==1) {
+ printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0;
+ }
+ }
+{{- end }}
+ ping_readiness_master.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h $REDIS_MASTER_HOST \
+ -p $REDIS_MASTER_PORT_NUMBER \
+{{- if .Values.tls.enabled }}
+ --tls \
+ --cacert {{ template "redis.tlsCACert" . }} \
+ {{- if .Values.tls.authClients }}
+ --cert {{ template "redis.tlsCert" . }} \
+ --key {{ template "redis.tlsCertKey" . }} \
+ {{- end }}
+{{- end }}
+ ping
+ )
+ if [ "$?" -eq "124" ]; then
+ echo "Timed out"
+ exit 1
+ fi
+ if [ "$response" != "PONG" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_liveness_master.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h $REDIS_MASTER_HOST \
+ -p $REDIS_MASTER_PORT_NUMBER \
+{{- if .Values.tls.enabled }}
+ --tls \
+ --cacert {{ template "redis.tlsCACert" . }} \
+ {{- if .Values.tls.authClients }}
+ --cert {{ template "redis.tlsCert" . }} \
+ --key {{ template "redis.tlsCertKey" . }} \
+ {{- end }}
+{{- end }}
+ ping
+ )
+ if [ "$?" -eq "124" ]; then
+ echo "Timed out"
+ exit 1
+ fi
+ responseFirstWord=$(echo $response | head -n1 | awk '{print $1;}')
+ if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_readiness_local_and_master.sh: |-
+ script_dir="$(dirname "$0")"
+ exit_status=0
+ "$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
+ "$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
+ exit $exit_status
+ ping_liveness_local_and_master.sh: |-
+ script_dir="$(dirname "$0")"
+ exit_status=0
+ "$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
+ "$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
+ exit $exit_status
diff --git a/charts/penpot/charts/redis/templates/master/application.yaml b/charts/penpot/charts/redis/templates/master/application.yaml
new file mode 100644
index 0000000..acff2e2
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/master/application.yaml
@@ -0,0 +1,516 @@
+{{- if or (not (eq .Values.architecture "replication")) (not .Values.sentinel.enabled) }}
+apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
+kind: {{ .Values.master.kind }}
+metadata:
+ name: {{ printf "%s-master" (include "common.names.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: master
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ replicas: {{ .Values.master.count }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ app.kubernetes.io/component: master
+ {{- if (eq .Values.master.kind "StatefulSet") }}
+ serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) }}
+ {{- end }}
+ {{- if .Values.master.updateStrategy }}
+ {{- if (eq .Values.master.kind "Deployment") }}
+ strategy: {{- toYaml .Values.master.updateStrategy | nindent 4 }}
+ {{- else }}
+ updateStrategy: {{- toYaml .Values.master.updateStrategy | nindent 4 }}
+ {{- end }}
+ {{- if and .Values.master.minReadySeconds (semverCompare ">= 1.25" (include "common.capabilities.kubeVersion" .)) }}
+ minReadySeconds: {{ .Values.master.minReadySeconds }}
+ {{- end }}
+ {{- end }}
+ template:
+ metadata:
+ labels: {{- include "common.labels.standard" . | nindent 8 }}
+ app.kubernetes.io/component: master
+ {{- if .Values.master.podLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.master.podLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if and .Values.metrics.enabled .Values.metrics.podLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ annotations:
+ {{- if (include "redis.createConfigmap" .) }}
+ checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
+ {{- end }}
+ checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }}
+ checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }}
+ checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
+ {{- if .Values.master.podAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.master.podAnnotations "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $ ) | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- include "redis.imagePullSecrets" . | nindent 6 }}
+ {{- if .Values.master.hostAliases }}
+ hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.master.hostAliases "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.master.podSecurityContext.enabled }}
+ securityContext: {{- omit .Values.master.podSecurityContext "enabled" | toYaml | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ template "redis.masterServiceAccountName" . }}
+ {{- if .Values.master.priorityClassName }}
+ priorityClassName: {{ .Values.master.priorityClassName | quote }}
+ {{- end }}
+ {{- if .Values.master.affinity }}
+ affinity: {{- include "common.tplvalues.render" (dict "value" .Values.master.affinity "context" $) | nindent 8 }}
+ {{- else }}
+ affinity:
+ podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAffinityPreset "component" "master" "context" $) | nindent 10 }}
+ podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAntiAffinityPreset "component" "master" "context" $) | nindent 10 }}
+ nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.master.nodeAffinityPreset.type "key" .Values.master.nodeAffinityPreset.key "values" .Values.master.nodeAffinityPreset.values) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.master.nodeSelector }}
+ nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.master.nodeSelector "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.master.tolerations }}
+ tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.master.tolerations "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.master.topologySpreadConstraints }}
+ topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.master.topologySpreadConstraints "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.master.shareProcessNamespace }}
+ shareProcessNamespace: {{ .Values.master.shareProcessNamespace }}
+ {{- end }}
+ {{- if .Values.master.schedulerName }}
+ schedulerName: {{ .Values.master.schedulerName | quote }}
+ {{- end }}
+ {{- if .Values.master.dnsPolicy }}
+ dnsPolicy: {{ .Values.master.dnsPolicy }}
+ {{- end }}
+ {{- if .Values.master.dnsConfig }}
+ dnsConfig: {{- include "common.tplvalues.render" (dict "value" .Values.master.dnsConfig "context" $) | nindent 8 }}
+ {{- end }}
+ terminationGracePeriodSeconds: {{ .Values.master.terminationGracePeriodSeconds }}
+ containers:
+ - name: redis
+ image: {{ template "redis.image" . }}
+ imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+ {{- if .Values.master.lifecycleHooks }}
+ lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.master.lifecycleHooks "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.master.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.master.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ {{- else if .Values.master.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.master.command "context" $) | nindent 12 }}
+ {{- else }}
+ command:
+ - /bin/bash
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- else if .Values.master.args }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.master.args "context" $) | nindent 12 }}
+ {{- else }}
+ args:
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-master.sh
+ {{- end }}
+ env:
+ - name: BITNAMI_DEBUG
+ value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
+ - name: REDIS_REPLICATION_MODE
+ value: master
+ - name: ALLOW_EMPTY_PASSWORD
+ value: {{ ternary "no" "yes" .Values.auth.enabled | quote }}
+ {{- if .Values.auth.enabled }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: REDIS_PASSWORD_FILE
+ value: "/opt/bitnami/redis/secrets/redis-password"
+ {{- else }}
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "redis.secretName" . }}
+ key: {{ template "redis.secretPasswordKey" . }}
+ {{- end }}
+ {{- end }}
+ - name: REDIS_TLS_ENABLED
+ value: {{ ternary "yes" "no" .Values.tls.enabled | quote }}
+ {{- if .Values.tls.enabled }}
+ - name: REDIS_TLS_PORT
+ value: {{ .Values.master.containerPorts.redis | quote }}
+ - name: REDIS_TLS_AUTH_CLIENTS
+ value: {{ ternary "yes" "no" .Values.tls.authClients | quote }}
+ - name: REDIS_TLS_CERT_FILE
+ value: {{ template "redis.tlsCert" . }}
+ - name: REDIS_TLS_KEY_FILE
+ value: {{ template "redis.tlsCertKey" . }}
+ - name: REDIS_TLS_CA_FILE
+ value: {{ template "redis.tlsCACert" . }}
+ {{- if .Values.tls.dhParamsFilename }}
+ - name: REDIS_TLS_DH_PARAMS_FILE
+ value: {{ template "redis.tlsDHParams" . }}
+ {{- end }}
+ {{- else }}
+ - name: REDIS_PORT
+ value: {{ .Values.master.containerPorts.redis | quote }}
+ {{- end }}
+ {{- if .Values.master.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.master.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if or .Values.master.extraEnvVarsCM .Values.master.extraEnvVarsSecret }}
+ envFrom:
+ {{- if .Values.master.extraEnvVarsCM }}
+ - configMapRef:
+ name: {{ .Values.master.extraEnvVarsCM }}
+ {{- end }}
+ {{- if .Values.master.extraEnvVarsSecret }}
+ - secretRef:
+ name: {{ .Values.master.extraEnvVarsSecret }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - name: redis
+ containerPort: {{ .Values.master.containerPorts.redis }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.master.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.master.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.master.startupProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: redis
+ {{- end }}
+ {{- if .Values.master.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.master.livenessProbe.enabled }}
+ livenessProbe:
+ initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }}
+ # One second longer than command timeout should prevent generation of zombie processes.
+ timeoutSeconds: {{ add1 .Values.master.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.master.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }}
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }}
+ {{- end }}
+ {{- if .Values.master.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.master.readinessProbe.enabled }}
+ readinessProbe:
+ initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ add1 .Values.master.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.master.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }}
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local.sh {{ .Values.master.readinessProbe.timeoutSeconds }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.master.resources }}
+ resources: {{- toYaml .Values.master.resources | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: start-scripts
+ mountPath: /opt/bitnami/scripts/start-scripts
+ - name: health
+ mountPath: /health
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: redis-password
+ mountPath: /opt/bitnami/redis/secrets/
+ {{- end }}
+ - name: redis-data
+ mountPath: {{ .Values.master.persistence.path }}
+ {{- if .Values.master.persistence.subPath }}
+ subPath: {{ .Values.master.persistence.subPath }}
+ {{- else if .Values.master.persistence.subPathExpr }}
+ subPathExpr: {{ .Values.master.persistence.subPathExpr }}
+ {{- end }}
+ - name: config
+ mountPath: /opt/bitnami/redis/mounted-etc
+ - name: redis-tmp-conf
+ mountPath: /opt/bitnami/redis/etc/
+ - name: tmp
+ mountPath: /tmp
+ {{- if .Values.tls.enabled }}
+ - name: redis-certificates
+ mountPath: /opt/bitnami/redis/certs
+ readOnly: true
+ {{- end }}
+ {{- if .Values.master.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.master.extraVolumeMounts "context" $ ) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.metrics.enabled }}
+ - name: metrics
+ image: {{ include "redis.metrics.image" . }}
+ imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
+ {{- if .Values.metrics.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ {{- else }}
+ command:
+ - /bin/bash
+ - -c
+ - |
+ if [[ -f '/secrets/redis-password' ]]; then
+ export REDIS_PASSWORD=$(cat /secrets/redis-password)
+ fi
+ redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- end }}
+ env:
+ - name: REDIS_ALIAS
+ value: {{ template "common.names.fullname" . }}
+ {{- if .Values.auth.enabled }}
+ - name: REDIS_USER
+ value: default
+ {{- if (not .Values.auth.usePasswordFiles) }}
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "redis.secretName" . }}
+ key: {{ template "redis.secretPasswordKey" . }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: REDIS_ADDR
+ value: rediss://{{ .Values.metrics.redisTargetHost }}:{{ .Values.master.containerPorts.redis }}
+ {{- if .Values.tls.authClients }}
+ - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE
+ value: {{ template "redis.tlsCertKey" . }}
+ - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE
+ value: {{ template "redis.tlsCert" . }}
+ {{- end }}
+ - name: REDIS_EXPORTER_TLS_CA_CERT_FILE
+ value: {{ template "redis.tlsCACert" . }}
+ {{- end }}
+ {{- if .Values.metrics.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ ports:
+ - name: metrics
+ containerPort: 9121
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.metrics.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: metrics
+ {{- end }}
+ {{- if .Values.metrics.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: metrics
+ {{- end }}
+ {{- if .Values.metrics.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /
+ port: metrics
+ {{- end }}
+ {{- end }}
+ {{- if .Values.metrics.resources }}
+ resources: {{- toYaml .Values.metrics.resources | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: redis-password
+ mountPath: /secrets/
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: redis-certificates
+ mountPath: /opt/bitnami/redis/certs
+ readOnly: true
+ {{- end }}
+ {{- if .Values.metrics.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumeMounts "context" $ ) | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.master.sidecars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.master.sidecars "context" $) | nindent 8 }}
+ {{- end }}
+ {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.master.persistence.enabled .Values.master.podSecurityContext.enabled .Values.master.containerSecurityContext.enabled }}
+ {{- if or .Values.master.initContainers $needsVolumePermissions .Values.sysctl.enabled }}
+ initContainers:
+ {{- if .Values.master.initContainers }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.master.initContainers "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if $needsVolumePermissions }}
+ - name: volume-permissions
+ image: {{ include "redis.volumePermissions.image" . }}
+ imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.master.persistence.path }}
+ {{- else }}
+ chown -R {{ .Values.master.containerSecurityContext.runAsUser }}:{{ .Values.master.podSecurityContext.fsGroup }} {{ .Values.master.persistence.path }}
+ {{- end }}
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }}
+ {{- else }}
+ securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.volumePermissions.resources }}
+ resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: redis-data
+ mountPath: {{ .Values.master.persistence.path }}
+ {{- if .Values.master.persistence.subPath }}
+ subPath: {{ .Values.master.persistence.subPath }}
+ {{- else if .Values.master.persistence.subPathExpr }}
+ subPathExpr: {{ .Values.master.persistence.subPathExpr }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.sysctl.enabled }}
+ - name: init-sysctl
+ image: {{ include "redis.sysctl.image" . }}
+ imagePullPolicy: {{ default "" .Values.sysctl.image.pullPolicy | quote }}
+ securityContext:
+ privileged: true
+ runAsUser: 0
+ {{- if .Values.sysctl.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.sysctl.command "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.sysctl.resources }}
+ resources: {{- toYaml .Values.sysctl.resources | nindent 12 }}
+ {{- end }}
+ {{- if .Values.sysctl.mountHostSys }}
+ volumeMounts:
+ - name: host-sys
+ mountPath: /host-sys
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ volumes:
+ - name: start-scripts
+ configMap:
+ name: {{ printf "%s-scripts" (include "common.names.fullname" .) }}
+ defaultMode: 0755
+ - name: health
+ configMap:
+ name: {{ printf "%s-health" (include "common.names.fullname" .) }}
+ defaultMode: 0755
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: redis-password
+ secret:
+ secretName: {{ template "redis.secretName" . }}
+ items:
+ - key: {{ template "redis.secretPasswordKey" . }}
+ path: redis-password
+ {{- end }}
+ - name: config
+ configMap:
+ name: {{ include "redis.configmapName" . }}
+ {{- if .Values.sysctl.mountHostSys }}
+ - name: host-sys
+ hostPath:
+ path: /sys
+ {{- end }}
+ - name: redis-tmp-conf
+ {{- if or .Values.master.persistence.medium .Values.master.persistence.sizeLimit }}
+ emptyDir:
+ {{- if .Values.master.persistence.medium }}
+ medium: {{ .Values.master.persistence.medium | quote }}
+ {{- end }}
+ {{- if .Values.master.persistence.sizeLimit }}
+ sizeLimit: {{ .Values.master.persistence.sizeLimit | quote }}
+ {{- end }}
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ - name: tmp
+ {{- if or .Values.master.persistence.medium .Values.master.persistence.sizeLimit }}
+ emptyDir:
+ {{- if .Values.master.persistence.medium }}
+ medium: {{ .Values.master.persistence.medium | quote }}
+ {{- end }}
+ {{- if .Values.master.persistence.sizeLimit }}
+ sizeLimit: {{ .Values.master.persistence.sizeLimit | quote }}
+ {{- end }}
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: redis-certificates
+ secret:
+ secretName: {{ include "redis.tlsSecretName" . }}
+ defaultMode: 256
+ {{- end }}
+ {{- if .Values.master.extraVolumes }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.master.extraVolumes "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.metrics.extraVolumes }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumes "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if not .Values.master.persistence.enabled }}
+ - name: redis-data
+ {{- if or .Values.master.persistence.medium .Values.master.persistence.sizeLimit }}
+ emptyDir:
+ {{- if .Values.master.persistence.medium }}
+ medium: {{ .Values.master.persistence.medium | quote }}
+ {{- end }}
+ {{- if .Values.master.persistence.sizeLimit }}
+ sizeLimit: {{ .Values.master.persistence.sizeLimit | quote }}
+ {{- end }}
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ {{- else if .Values.master.persistence.existingClaim }}
+ - name: redis-data
+ persistentVolumeClaim:
+ claimName: {{ printf "%s" (tpl .Values.master.persistence.existingClaim .) }}
+ {{- else if (eq .Values.master.kind "Deployment") }}
+ - name: redis-data
+ persistentVolumeClaim:
+ claimName: {{ printf "redis-data-%s-master" (include "common.names.fullname" .) }}
+ {{- else }}
+ volumeClaimTemplates:
+ - metadata:
+ name: redis-data
+ labels: {{- include "common.labels.matchLabels" . | nindent 10 }}
+ app.kubernetes.io/component: master
+ {{- if .Values.master.persistence.annotations }}
+ annotations: {{- toYaml .Values.master.persistence.annotations | nindent 10 }}
+ {{- end }}
+ spec:
+ accessModes:
+ {{- range .Values.master.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.master.persistence.size | quote }}
+ {{- if .Values.master.persistence.selector }}
+ selector: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.selector "context" $) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.master.persistence.dataSource }}
+ dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.dataSource "context" $) | nindent 10 }}
+ {{- end }}
+ {{- include "common.storage.class" (dict "persistence" .Values.master.persistence "global" .Values.global) | nindent 8 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/master/psp.yaml b/charts/penpot/charts/redis/templates/master/psp.yaml
new file mode 100644
index 0000000..2ba93b6
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/master/psp.yaml
@@ -0,0 +1,46 @@
+{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}}
+{{- if and $pspAvailable .Values.podSecurityPolicy.create }}
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: {{ printf "%s-master" (include "common.names.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ allowPrivilegeEscalation: false
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ - min: {{ .Values.master.podSecurityContext.fsGroup }}
+ max: {{ .Values.master.podSecurityContext.fsGroup }}
+ hostIPC: false
+ hostNetwork: false
+ hostPID: false
+ privileged: false
+ readOnlyRootFilesystem: false
+ requiredDropCapabilities:
+ - ALL
+ runAsUser:
+ rule: 'MustRunAs'
+ ranges:
+ - min: {{ .Values.master.containerSecurityContext.runAsUser }}
+ max: {{ .Values.master.containerSecurityContext.runAsUser }}
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ - min: {{ .Values.master.containerSecurityContext.runAsUser }}
+ max: {{ .Values.master.containerSecurityContext.runAsUser }}
+ volumes:
+ - 'configMap'
+ - 'secret'
+ - 'emptyDir'
+ - 'persistentVolumeClaim'
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/master/pvc.yaml b/charts/penpot/charts/redis/templates/master/pvc.yaml
new file mode 100644
index 0000000..e5fddb0
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/master/pvc.yaml
@@ -0,0 +1,27 @@
+{{- if and (eq .Values.architecture "standalone") (eq .Values.master.kind "Deployment") (.Values.master.persistence.enabled) (not .Values.master.persistence.existingClaim) }}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ printf "redis-data-%s-master" (include "common.names.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.matchLabels" . | nindent 4 }}
+ app.kubernetes.io/component: master
+ {{- if .Values.master.persistence.annotations }}
+ annotations: {{- toYaml .Values.master.persistence.annotations | nindent 4 }}
+ {{- end }}
+spec:
+ accessModes:
+ {{- range .Values.master.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.master.persistence.size | quote }}
+ {{- if .Values.master.persistence.selector }}
+ selector: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.selector "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.master.persistence.dataSource }}
+ dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.dataSource "context" $) | nindent 4 }}
+ {{- end }}
+ {{- include "common.storage.class" (dict "persistence" .Values.master.persistence "global" .Values.global) | nindent 2 }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/master/service.yaml b/charts/penpot/charts/redis/templates/master/service.yaml
new file mode 100644
index 0000000..c03fea7
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/master/service.yaml
@@ -0,0 +1,61 @@
+{{- if not .Values.sentinel.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ printf "%s-master" (include "common.names.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: master
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.master.service.annotations .Values.commonAnnotations }}
+ annotations:
+ {{- if .Values.master.service.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.master.service.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+spec:
+ type: {{ .Values.master.service.type }}
+ {{- if or (eq .Values.master.service.type "LoadBalancer") (eq .Values.master.service.type "NodePort") }}
+ externalTrafficPolicy: {{ .Values.master.service.externalTrafficPolicy | quote }}
+ {{- end }}
+ {{- if (semverCompare ">=1.22-0" (include "common.capabilities.kubeVersion" .)) }}
+ internalTrafficPolicy: {{ .Values.master.service.internalTrafficPolicy }}
+ {{- end }}
+ {{- if and (eq .Values.master.service.type "LoadBalancer") (not (empty .Values.master.service.loadBalancerIP)) }}
+ loadBalancerIP: {{ .Values.master.service.loadBalancerIP }}
+ {{- end }}
+ {{- if and (eq .Values.master.service.type "LoadBalancer") (not (empty .Values.master.service.loadBalancerSourceRanges)) }}
+ loadBalancerSourceRanges: {{ .Values.master.service.loadBalancerSourceRanges }}
+ {{- end }}
+ {{- if and .Values.master.service.clusterIP (eq .Values.master.service.type "ClusterIP") }}
+ clusterIP: {{ .Values.master.service.clusterIP }}
+ {{- end }}
+ {{- if .Values.master.service.sessionAffinity }}
+ sessionAffinity: {{ .Values.master.service.sessionAffinity }}
+ {{- end }}
+ {{- if .Values.master.service.sessionAffinityConfig }}
+ sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.master.service.sessionAffinityConfig "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.master.service.externalIPs }}
+ externalIPs: {{- include "common.tplvalues.render" (dict "value" .Values.master.service.externalIPs "context" $) | nindent 4 }}
+ {{- end }}
+ ports:
+ - name: tcp-redis
+ port: {{ .Values.master.service.ports.redis }}
+ targetPort: redis
+ {{- if and (or (eq .Values.master.service.type "NodePort") (eq .Values.master.service.type "LoadBalancer")) .Values.master.service.nodePorts.redis}}
+ nodePort: {{ .Values.master.service.nodePorts.redis}}
+ {{- else if eq .Values.master.service.type "ClusterIP" }}
+ nodePort: null
+ {{- end }}
+ {{- if .Values.master.service.extraPorts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.master.service.extraPorts "context" $) | nindent 4 }}
+ {{- end }}
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+ app.kubernetes.io/component: master
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/master/serviceaccount.yaml b/charts/penpot/charts/redis/templates/master/serviceaccount.yaml
new file mode 100644
index 0000000..9c62e5f
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/master/serviceaccount.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.master.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+automountServiceAccountToken: {{ .Values.master.serviceAccount.automountServiceAccountToken }}
+metadata:
+ name: {{ template "redis.masterServiceAccountName" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.commonAnnotations .Values.master.serviceAccount.annotations }}
+ annotations:
+ {{- if or .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.master.serviceAccount.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.master.serviceAccount.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/metrics-svc.yaml b/charts/penpot/charts/redis/templates/metrics-svc.yaml
new file mode 100644
index 0000000..13c552f
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/metrics-svc.yaml
@@ -0,0 +1,41 @@
+{{- if .Values.metrics.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ printf "%s-metrics" (include "common.names.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: metrics
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }}
+ annotations:
+ {{- if .Values.metrics.service.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+spec:
+ type: {{ .Values.metrics.service.type }}
+ {{- if eq .Values.metrics.service.type "LoadBalancer" }}
+ externalTrafficPolicy: {{ .Values.metrics.service.externalTrafficPolicy }}
+ {{- end }}
+ {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }}
+ {{- end }}
+ {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges: {{- toYaml .Values.metrics.service.loadBalancerSourceRanges | nindent 4 }}
+ {{- end }}
+ ports:
+ - name: http-metrics
+ port: {{ .Values.metrics.service.port }}
+ protocol: TCP
+ targetPort: metrics
+ {{- if .Values.metrics.service.extraPorts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.metrics.service.extraPorts "context" $) | nindent 4 }}
+ {{- end }}
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/networkpolicy.yaml b/charts/penpot/charts/redis/templates/networkpolicy.yaml
new file mode 100644
index 0000000..7205cea
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/networkpolicy.yaml
@@ -0,0 +1,82 @@
+{{- if .Values.networkPolicy.enabled }}
+kind: NetworkPolicy
+apiVersion: {{ template "networkPolicy.apiVersion" . }}
+metadata:
+ name: {{ template "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ policyTypes:
+ - Ingress
+ {{- if or (eq .Values.architecture "replication") .Values.networkPolicy.extraEgress }}
+ - Egress
+ egress:
+ {{- if eq .Values.architecture "replication" }}
+ # Allow dns resolution
+ - ports:
+ - port: 53
+ protocol: UDP
+ # Allow outbound connections to other cluster pods
+ - ports:
+ - port: {{ .Values.master.containerPorts.redis }}
+ {{- if .Values.sentinel.enabled }}
+ - port: {{ .Values.sentinel.containerPorts.sentinel }}
+ {{- end }}
+ to:
+ - podSelector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }}
+ {{- end }}
+ {{- if .Values.networkPolicy.extraEgress }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraEgress "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+ ingress:
+ # Allow inbound connections
+ - ports:
+ - port: {{ .Values.master.containerPorts.redis }}
+ {{- if .Values.sentinel.enabled }}
+ - port: {{ .Values.sentinel.containerPorts.sentinel }}
+ {{- end }}
+ {{- if not .Values.networkPolicy.allowExternal }}
+ from:
+ - podSelector:
+ matchLabels:
+ {{ template "common.names.fullname" . }}-client: "true"
+ - podSelector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }}
+ {{- if or .Values.networkPolicy.ingressNSMatchLabels .Values.networkPolicy.ingressNSPodMatchLabels }}
+ - namespaceSelector:
+ matchLabels:
+ {{- if .Values.networkPolicy.ingressNSMatchLabels }}
+ {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }}
+ {{ $key | quote }}: {{ $value | quote }}
+ {{- end }}
+ {{ else }}
+ {}
+ {{- end }}
+ {{- if .Values.networkPolicy.ingressNSPodMatchLabels }}
+ podSelector:
+ matchLabels:
+ {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }}
+ {{ $key | quote }}: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.metrics.enabled }}
+ # Allow prometheus scrapes for metrics
+ - ports:
+ - port: 9121
+ {{- end }}
+ {{- if .Values.networkPolicy.extraIngress }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraIngress "context" $ ) | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/pdb.yaml b/charts/penpot/charts/redis/templates/pdb.yaml
new file mode 100644
index 0000000..f82d278
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/pdb.yaml
@@ -0,0 +1,23 @@
+{{- if .Values.pdb.create }}
+apiVersion: {{ include "common.capabilities.policy.apiVersion" . }}
+kind: PodDisruptionBudget
+metadata:
+ name: {{ template "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ {{- if .Values.pdb.minAvailable }}
+ minAvailable: {{ .Values.pdb.minAvailable }}
+ {{- end }}
+ {{- if .Values.pdb.maxUnavailable }}
+ maxUnavailable: {{ .Values.pdb.maxUnavailable }}
+ {{- end }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/prometheusrule.yaml b/charts/penpot/charts/redis/templates/prometheusrule.yaml
new file mode 100644
index 0000000..b89d116
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/prometheusrule.yaml
@@ -0,0 +1,21 @@
+{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: PrometheusRule
+metadata:
+ name: {{ template "common.names.fullname" . }}
+ namespace: {{ default .Release.Namespace .Values.metrics.prometheusRule.namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.metrics.prometheusRule.additionalLabels }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ groups:
+ - name: {{ include "common.names.fullname" . }}
+ rules: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.rules "context" $ ) | nindent 8 }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/replicas/hpa.yaml b/charts/penpot/charts/redis/templates/replicas/hpa.yaml
new file mode 100644
index 0000000..ea069a8
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/replicas/hpa.yaml
@@ -0,0 +1,47 @@
+{{- if and .Values.replica.autoscaling.enabled (not .Values.sentinel.enabled) }}
+apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }}
+kind: HorizontalPodAutoscaler
+metadata:
+ name: {{ printf "%s-replicas" (include "common.names.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: replica
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ scaleTargetRef:
+ apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }}
+ kind: StatefulSet
+ name: {{ printf "%s-replicas" (include "common.names.fullname" .) }}
+ minReplicas: {{ .Values.replica.autoscaling.minReplicas }}
+ maxReplicas: {{ .Values.replica.autoscaling.maxReplicas }}
+ metrics:
+ {{- if .Values.replica.autoscaling.targetMemory }}
+ - type: Resource
+ resource:
+ name: memory
+ {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }}
+ targetAverageUtilization: {{ .Values.replica.autoscaling.targetMemory }}
+ {{- else }}
+ target:
+ type: Utilization
+ averageUtilization: {{ .Values.replica.autoscaling.targetMemory }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.replica.autoscaling.targetCPU }}
+ - type: Resource
+ resource:
+ name: cpu
+ {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }}
+ targetAverageUtilization: {{ .Values.replica.autoscaling.targetCPU }}
+ {{- else }}
+ target:
+ type: Utilization
+ averageUtilization: {{ .Values.replica.autoscaling.targetCPU }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/replicas/service.yaml b/charts/penpot/charts/redis/templates/replicas/service.yaml
new file mode 100644
index 0000000..f261926
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/replicas/service.yaml
@@ -0,0 +1,58 @@
+{{- if and (eq .Values.architecture "replication") (not .Values.sentinel.enabled) }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ printf "%s-replicas" (include "common.names.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: replica
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.replica.service.annotations .Values.commonAnnotations }}
+ annotations:
+ {{- if .Values.replica.service.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.replica.service.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+spec:
+ type: {{ .Values.replica.service.type }}
+ {{- if or (eq .Values.replica.service.type "LoadBalancer") (eq .Values.replica.service.type "NodePort") }}
+ externalTrafficPolicy: {{ .Values.replica.service.externalTrafficPolicy | quote }}
+ {{- end }}
+ {{- if (semverCompare ">=1.22-0" (include "common.capabilities.kubeVersion" .)) }}
+ internalTrafficPolicy: {{ .Values.replica.service.internalTrafficPolicy }}
+ {{- end }}
+ {{- if and (eq .Values.replica.service.type "LoadBalancer") (not (empty .Values.replica.service.loadBalancerIP)) }}
+ loadBalancerIP: {{ .Values.replica.service.loadBalancerIP }}
+ {{- end }}
+ {{- if and (eq .Values.replica.service.type "LoadBalancer") (not (empty .Values.replica.service.loadBalancerSourceRanges)) }}
+ loadBalancerSourceRanges: {{ .Values.replica.service.loadBalancerSourceRanges }}
+ {{- end }}
+ {{- if and .Values.replica.service.clusterIP (eq .Values.replica.service.type "ClusterIP") }}
+ clusterIP: {{ .Values.replica.service.clusterIP }}
+ {{- end }}
+ {{- if .Values.replica.service.sessionAffinity }}
+ sessionAffinity: {{ .Values.replica.service.sessionAffinity }}
+ {{- end }}
+ {{- if .Values.replica.service.sessionAffinityConfig }}
+ sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.replica.service.sessionAffinityConfig "context" $) | nindent 4 }}
+ {{- end }}
+ ports:
+ - name: tcp-redis
+ port: {{ .Values.replica.service.ports.redis }}
+ targetPort: redis
+ {{- if and (or (eq .Values.replica.service.type "NodePort") (eq .Values.replica.service.type "LoadBalancer")) .Values.replica.service.nodePorts.redis}}
+ nodePort: {{ .Values.replica.service.nodePorts.redis}}
+ {{- else if eq .Values.replica.service.type "ClusterIP" }}
+ nodePort: null
+ {{- end }}
+ {{- if .Values.replica.service.extraPorts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.replica.service.extraPorts "context" $) | nindent 4 }}
+ {{- end }}
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+ app.kubernetes.io/component: replica
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/replicas/serviceaccount.yaml b/charts/penpot/charts/redis/templates/replicas/serviceaccount.yaml
new file mode 100644
index 0000000..333ec9f
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/replicas/serviceaccount.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.replica.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+automountServiceAccountToken: {{ .Values.replica.serviceAccount.automountServiceAccountToken }}
+metadata:
+ name: {{ template "redis.replicaServiceAccountName" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.commonAnnotations .Values.replica.serviceAccount.annotations }}
+ annotations:
+ {{- if or .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.replica.serviceAccount.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.replica.serviceAccount.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/replicas/statefulset.yaml b/charts/penpot/charts/redis/templates/replicas/statefulset.yaml
new file mode 100644
index 0000000..8a8541d
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/replicas/statefulset.yaml
@@ -0,0 +1,515 @@
+{{- if and (eq .Values.architecture "replication") (not .Values.sentinel.enabled) }}
+apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
+kind: StatefulSet
+metadata:
+ name: {{ printf "%s-replicas" (include "common.names.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: replica
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ {{- if not .Values.replica.autoscaling.enabled }}
+ replicas: {{ .Values.replica.replicaCount }}
+ {{- end }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ app.kubernetes.io/component: replica
+ serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) }}
+ {{- if .Values.replica.updateStrategy }}
+ updateStrategy: {{- toYaml .Values.replica.updateStrategy | nindent 4 }}
+ {{- end }}
+ {{- if and .Values.replica.minReadySeconds (semverCompare ">= 1.25" (include "common.capabilities.kubeVersion" .)) }}
+ minReadySeconds: {{ .Values.replica.minReadySeconds }}
+ {{- end }}
+ {{- if .Values.replica.podManagementPolicy }}
+ podManagementPolicy: {{ .Values.replica.podManagementPolicy | quote }}
+ {{- end }}
+ template:
+ metadata:
+ labels: {{- include "common.labels.standard" . | nindent 8 }}
+ app.kubernetes.io/component: replica
+ {{- if .Values.replica.podLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.replica.podLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if and .Values.metrics.enabled .Values.metrics.podLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ annotations:
+ {{- if (include "redis.createConfigmap" .) }}
+ checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
+ {{- end }}
+ checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }}
+ checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }}
+ checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
+ {{- if .Values.replica.podAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.replica.podAnnotations "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $ ) | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- include "redis.imagePullSecrets" . | nindent 6 }}
+ {{- if .Values.replica.hostAliases }}
+ hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.replica.hostAliases "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.replica.podSecurityContext.enabled }}
+ securityContext: {{- omit .Values.replica.podSecurityContext "enabled" | toYaml | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ template "redis.replicaServiceAccountName" . }}
+ {{- if .Values.replica.priorityClassName }}
+ priorityClassName: {{ .Values.replica.priorityClassName | quote }}
+ {{- end }}
+ {{- if .Values.replica.affinity }}
+ affinity: {{- include "common.tplvalues.render" (dict "value" .Values.replica.affinity "context" $) | nindent 8 }}
+ {{- else }}
+ affinity:
+ podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.replica.podAffinityPreset "component" "replica" "context" $) | nindent 10 }}
+ podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.replica.podAntiAffinityPreset "component" "replica" "context" $) | nindent 10 }}
+ nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.replica.nodeAffinityPreset.type "key" .Values.replica.nodeAffinityPreset.key "values" .Values.replica.nodeAffinityPreset.values) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.replica.nodeSelector }}
+ nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.replica.nodeSelector "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.replica.tolerations }}
+ tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.replica.tolerations "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.replica.topologySpreadConstraints }}
+ topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.replica.topologySpreadConstraints "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.replica.shareProcessNamespace }}
+ shareProcessNamespace: {{ .Values.replica.shareProcessNamespace }}
+ {{- end }}
+ {{- if .Values.replica.schedulerName }}
+ schedulerName: {{ .Values.replica.schedulerName | quote }}
+ {{- end }}
+ {{- if .Values.replica.dnsPolicy }}
+ dnsPolicy: {{ .Values.replica.dnsPolicy }}
+ {{- end }}
+ {{- if .Values.replica.dnsConfig }}
+ dnsConfig: {{- include "common.tplvalues.render" (dict "value" .Values.replica.dnsConfig "context" $) | nindent 8 }}
+ {{- end }}
+ terminationGracePeriodSeconds: {{ .Values.replica.terminationGracePeriodSeconds }}
+ containers:
+ - name: redis
+ image: {{ template "redis.image" . }}
+ imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.replica.lifecycleHooks }}
+ lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.replica.lifecycleHooks "context" $) | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.replica.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.replica.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ {{- else if .Values.replica.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.replica.command "context" $) | nindent 12 }}
+ {{- else }}
+ command:
+ - /bin/bash
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- else if .Values.replica.args }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.replica.args "context" $) | nindent 12 }}
+ {{- else }}
+ args:
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-replica.sh
+ {{- end }}
+ env:
+ - name: BITNAMI_DEBUG
+ value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
+ - name: REDIS_REPLICATION_MODE
+ value: replica
+ - name: REDIS_MASTER_HOST
+ {{- if and (eq (int64 .Values.master.count) 1) (ne .Values.master.kind "Deployment") }}
+ value: {{ template "common.names.fullname" . }}-master-0.{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}
+ {{- else }}
+ value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}
+ {{- end }}
+ - name: REDIS_MASTER_PORT_NUMBER
+ value: {{ .Values.master.containerPorts.redis | quote }}
+ - name: ALLOW_EMPTY_PASSWORD
+ value: {{ ternary "no" "yes" .Values.auth.enabled | quote }}
+ {{- if .Values.auth.enabled }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: REDIS_PASSWORD_FILE
+ value: "/opt/bitnami/redis/secrets/redis-password"
+ - name: REDIS_MASTER_PASSWORD_FILE
+ value: "/opt/bitnami/redis/secrets/redis-password"
+ {{- else }}
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "redis.secretName" . }}
+ key: {{ template "redis.secretPasswordKey" . }}
+ - name: REDIS_MASTER_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "redis.secretName" . }}
+ key: {{ template "redis.secretPasswordKey" . }}
+ {{- end }}
+ {{- end }}
+ - name: REDIS_TLS_ENABLED
+ value: {{ ternary "yes" "no" .Values.tls.enabled | quote }}
+ {{- if .Values.tls.enabled }}
+ - name: REDIS_TLS_PORT
+ value: {{ .Values.replica.containerPorts.redis | quote }}
+ - name: REDIS_TLS_AUTH_CLIENTS
+ value: {{ ternary "yes" "no" .Values.tls.authClients | quote }}
+ - name: REDIS_TLS_CERT_FILE
+ value: {{ template "redis.tlsCert" . }}
+ - name: REDIS_TLS_KEY_FILE
+ value: {{ template "redis.tlsCertKey" . }}
+ - name: REDIS_TLS_CA_FILE
+ value: {{ template "redis.tlsCACert" . }}
+ {{- if .Values.tls.dhParamsFilename }}
+ - name: REDIS_TLS_DH_PARAMS_FILE
+ value: {{ template "redis.tlsDHParams" . }}
+ {{- end }}
+ {{- else }}
+ - name: REDIS_PORT
+ value: {{ .Values.replica.containerPorts.redis | quote }}
+ {{- end }}
+ {{- if .Values.replica.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.replica.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if or .Values.replica.extraEnvVarsCM .Values.replica.extraEnvVarsSecret }}
+ envFrom:
+ {{- if .Values.replica.extraEnvVarsCM }}
+ - configMapRef:
+ name: {{ .Values.replica.extraEnvVarsCM }}
+ {{- end }}
+ {{- if .Values.replica.extraEnvVarsSecret }}
+ - secretRef:
+ name: {{ .Values.replica.extraEnvVarsSecret }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - name: redis
+ containerPort: {{ .Values.replica.containerPorts.redis }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.replica.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.replica.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.replica.startupProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: redis
+ {{- end }}
+ {{- if .Values.replica.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.replica.livenessProbe.enabled }}
+ livenessProbe:
+ initialDelaySeconds: {{ .Values.replica.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.replica.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ add1 .Values.replica.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.replica.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.replica.livenessProbe.failureThreshold }}
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local_and_master.sh {{ .Values.replica.livenessProbe.timeoutSeconds }}
+ {{- end }}
+ {{- if .Values.replica.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.replica.readinessProbe.enabled }}
+ readinessProbe:
+ initialDelaySeconds: {{ .Values.replica.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.replica.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ add1 .Values.replica.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.replica.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.replica.readinessProbe.failureThreshold }}
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local_and_master.sh {{ .Values.replica.readinessProbe.timeoutSeconds }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.replica.resources }}
+ resources: {{- toYaml .Values.replica.resources | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: start-scripts
+ mountPath: /opt/bitnami/scripts/start-scripts
+ - name: health
+ mountPath: /health
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: redis-password
+ mountPath: /opt/bitnami/redis/secrets/
+ {{- end }}
+ - name: redis-data
+ mountPath: /data
+ {{- if .Values.replica.persistence.subPath }}
+ subPath: {{ .Values.replica.persistence.subPath }}
+ {{- else if .Values.replica.persistence.subPathExpr }}
+ subPathExpr: {{ .Values.replica.persistence.subPathExpr }}
+ {{- end }}
+ - name: config
+ mountPath: /opt/bitnami/redis/mounted-etc
+ - name: redis-tmp-conf
+ mountPath: /opt/bitnami/redis/etc
+ {{- if .Values.tls.enabled }}
+ - name: redis-certificates
+ mountPath: /opt/bitnami/redis/certs
+ readOnly: true
+ {{- end }}
+ {{- if .Values.replica.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraVolumeMounts "context" $ ) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.metrics.enabled }}
+ - name: metrics
+ image: {{ include "redis.metrics.image" . }}
+ imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
+ {{- if .Values.metrics.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.command "context" $) | nindent 12 }}
+ {{- else }}
+ command:
+ - /bin/bash
+ - -c
+ - |
+ if [[ -f '/secrets/redis-password' ]]; then
+ export REDIS_PASSWORD=$(cat /secrets/redis-password)
+ fi
+ redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- end }}
+ env:
+ - name: REDIS_ALIAS
+ value: {{ template "common.names.fullname" . }}
+ {{- if .Values.auth.enabled }}
+ - name: REDIS_USER
+ value: default
+ {{- if (not .Values.auth.usePasswordFiles) }}
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "redis.secretName" . }}
+ key: {{ template "redis.secretPasswordKey" . }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: REDIS_ADDR
+ value: rediss://{{ .Values.metrics.redisTargetHost }}:{{ .Values.replica.containerPorts.redis }}
+ {{- if .Values.tls.authClients }}
+ - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE
+ value: {{ template "redis.tlsCertKey" . }}
+ - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE
+ value: {{ template "redis.tlsCert" . }}
+ {{- end }}
+ - name: REDIS_EXPORTER_TLS_CA_CERT_FILE
+ value: {{ template "redis.tlsCACert" . }}
+ {{- end }}
+ {{- if .Values.metrics.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ ports:
+ - name: metrics
+ containerPort: 9121
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.metrics.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: metrics
+ {{- end }}
+ {{- if .Values.metrics.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: metrics
+ {{- end }}
+ {{- if .Values.metrics.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /
+ port: metrics
+ {{- end }}
+ {{- end }}
+ {{- if .Values.metrics.resources }}
+ resources: {{- toYaml .Values.metrics.resources | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: redis-password
+ mountPath: /secrets/
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: redis-certificates
+ mountPath: /opt/bitnami/redis/certs
+ readOnly: true
+ {{- end }}
+ {{- if .Values.metrics.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumeMounts "context" $ ) | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.replica.sidecars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.replica.sidecars "context" $) | nindent 8 }}
+ {{- end }}
+ {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.replica.persistence.enabled .Values.replica.podSecurityContext.enabled .Values.replica.containerSecurityContext.enabled }}
+ {{- if or .Values.replica.initContainers $needsVolumePermissions .Values.sysctl.enabled }}
+ initContainers:
+ {{- if .Values.replica.initContainers }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.replica.initContainers "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if $needsVolumePermissions }}
+ - name: volume-permissions
+ image: {{ include "redis.volumePermissions.image" . }}
+ imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.replica.persistence.path }}
+ {{- else }}
+ chown -R {{ .Values.replica.containerSecurityContext.runAsUser }}:{{ .Values.replica.podSecurityContext.fsGroup }} {{ .Values.replica.persistence.path }}
+ {{- end }}
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }}
+ {{- else }}
+ securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.volumePermissions.resources }}
+ resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: redis-data
+ mountPath: {{ .Values.replica.persistence.path }}
+ {{- if .Values.replica.persistence.subPath }}
+ subPath: {{ .Values.replica.persistence.subPath }}
+ {{- else if .Values.replica.persistence.subPathExpr }}
+ subPathExpr: {{ .Values.replica.persistence.subPathExpr }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.sysctl.enabled }}
+ - name: init-sysctl
+ image: {{ include "redis.sysctl.image" . }}
+ imagePullPolicy: {{ default "" .Values.sysctl.image.pullPolicy | quote }}
+ securityContext:
+ privileged: true
+ runAsUser: 0
+ {{- if .Values.sysctl.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.sysctl.command "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.sysctl.resources }}
+ resources: {{- toYaml .Values.sysctl.resources | nindent 12 }}
+ {{- end }}
+ {{- if .Values.sysctl.mountHostSys }}
+ volumeMounts:
+ - name: host-sys
+ mountPath: /host-sys
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ volumes:
+ - name: start-scripts
+ configMap:
+ name: {{ printf "%s-scripts" (include "common.names.fullname" .) }}
+ defaultMode: 0755
+ - name: health
+ configMap:
+ name: {{ printf "%s-health" (include "common.names.fullname" .) }}
+ defaultMode: 0755
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: redis-password
+ secret:
+ secretName: {{ template "redis.secretName" . }}
+ items:
+ - key: {{ template "redis.secretPasswordKey" . }}
+ path: redis-password
+ {{- end }}
+ - name: config
+ configMap:
+ name: {{ include "redis.configmapName" . }}
+ {{- if .Values.sysctl.mountHostSys }}
+ - name: host-sys
+ hostPath:
+ path: /sys
+ {{- end }}
+ - name: redis-tmp-conf
+ {{- if or .Values.replica.persistence.medium .Values.replica.persistence.sizeLimit }}
+ emptyDir:
+ {{- if .Values.replica.persistence.medium }}
+ medium: {{ .Values.replica.persistence.medium | quote }}
+ {{- end }}
+ {{- if .Values.replica.persistence.sizeLimit }}
+ sizeLimit: {{ .Values.replica.persistence.sizeLimit | quote }}
+ {{- end }}
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: redis-certificates
+ secret:
+ secretName: {{ include "redis.tlsSecretName" . }}
+ defaultMode: 256
+ {{- end }}
+ {{- if .Values.replica.extraVolumes }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraVolumes "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.metrics.extraVolumes }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumes "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if not .Values.replica.persistence.enabled }}
+ - name: redis-data
+ {{- if or .Values.replica.persistence.medium .Values.replica.persistence.sizeLimit }}
+ emptyDir:
+ {{- if .Values.replica.persistence.medium }}
+ medium: {{ .Values.replica.persistence.medium | quote }}
+ {{- end }}
+ {{- if .Values.replica.persistence.sizeLimit }}
+ sizeLimit: {{ .Values.replica.persistence.sizeLimit | quote }}
+ {{- end }}
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ {{- else if .Values.replica.persistence.existingClaim }}
+ - name: redis-data
+ persistentVolumeClaim:
+ claimName: {{ printf "%s" (tpl .Values.replica.persistence.existingClaim .) }}
+ {{- else }}
+ volumeClaimTemplates:
+ - metadata:
+ name: redis-data
+ labels: {{- include "common.labels.matchLabels" . | nindent 10 }}
+ app.kubernetes.io/component: replica
+ {{- if .Values.replica.persistence.annotations }}
+ annotations: {{- toYaml .Values.replica.persistence.annotations | nindent 10 }}
+ {{- end }}
+ spec:
+ accessModes:
+ {{- range .Values.replica.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.replica.persistence.size | quote }}
+ {{- if .Values.replica.persistence.selector }}
+ selector: {{- include "common.tplvalues.render" (dict "value" .Values.replica.persistence.selector "context" $) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.replica.persistence.dataSource }}
+ dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.replica.persistence.dataSource "context" $) | nindent 10 }}
+ {{- end }}
+ {{- include "common.storage.class" (dict "persistence" .Values.replica.persistence "global" .Values.global) | nindent 8 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/role.yaml b/charts/penpot/charts/redis/templates/role.yaml
new file mode 100644
index 0000000..596466f
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/role.yaml
@@ -0,0 +1,28 @@
+{{- if .Values.rbac.create }}
+apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
+kind: Role
+metadata:
+ name: {{ template "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+rules:
+ {{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}}
+ {{- if and $pspAvailable .Values.podSecurityPolicy.enabled }}
+ - apiGroups:
+ - '{{ template "podSecurityPolicy.apiGroup" . }}'
+ resources:
+ - 'podsecuritypolicies'
+ verbs:
+ - 'use'
+ resourceNames: [{{ printf "%s-master" (include "common.names.fullname" .) }}]
+ {{- end }}
+ {{- if .Values.rbac.rules }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/rolebinding.yaml b/charts/penpot/charts/redis/templates/rolebinding.yaml
new file mode 100644
index 0000000..74968b8
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/rolebinding.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.rbac.create }}
+apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
+kind: RoleBinding
+metadata:
+ name: {{ template "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ template "common.names.fullname" . }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "redis.serviceAccountName" . }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/scripts-configmap.yaml b/charts/penpot/charts/redis/templates/scripts-configmap.yaml
new file mode 100644
index 0000000..39fcc50
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/scripts-configmap.yaml
@@ -0,0 +1,681 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-scripts" (include "common.names.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }}
+ start-node.sh: |
+ #!/bin/bash
+
+ . /opt/bitnami/scripts/libos.sh
+ . /opt/bitnami/scripts/liblog.sh
+ . /opt/bitnami/scripts/libvalidations.sh
+
+ get_port() {
+ hostname="$1"
+ type="$2"
+
+ port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
+ port=${!port_var}
+
+ if [ -z "$port" ]; then
+ case $type in
+ "SENTINEL")
+ echo {{ .Values.sentinel.containerPorts.sentinel }}
+ ;;
+ "REDIS")
+ echo {{ .Values.master.containerPorts.redis }}
+ ;;
+ esac
+ else
+ echo $port
+ fi
+ }
+
+ get_full_hostname() {
+ hostname="$1"
+
+ {{- if .Values.useExternalDNS.enabled }}
+ echo "${hostname}.{{- include "redis.externalDNS.suffix" . }}"
+ {{- else if eq .Values.sentinel.service.type "NodePort" }}
+ echo "${hostname}.{{- .Release.Namespace }}"
+ {{- else }}
+ echo "${hostname}.${HEADLESS_SERVICE}"
+ {{- end }}
+ }
+
+ REDISPORT=$(get_port "$HOSTNAME" "REDIS")
+
+ HEADLESS_SERVICE="{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}"
+
+ if [ -n "$REDIS_EXTERNAL_MASTER_HOST" ]; then
+ REDIS_SERVICE="$REDIS_EXTERNAL_MASTER_HOST"
+ else
+ REDIS_SERVICE="{{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}"
+ fi
+
+ SENTINEL_SERVICE_PORT=$(get_port "{{ include "common.names.fullname" . }}" "TCP_SENTINEL")
+ validate_quorum() {
+ if is_boolean_yes "$REDIS_TLS_ENABLED"; then
+ quorum_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel master {{ .Values.sentinel.masterSet }}"
+ else
+ quorum_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT sentinel master {{ .Values.sentinel.masterSet }}"
+ fi
+ info "about to run the command: $quorum_info_command"
+ eval $quorum_info_command | grep -Fq "s_down"
+ }
+
+ trigger_manual_failover() {
+ if is_boolean_yes "$REDIS_TLS_ENABLED"; then
+ failover_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel failover {{ .Values.sentinel.masterSet }}"
+ else
+ failover_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT sentinel failover {{ .Values.sentinel.masterSet }}"
+ fi
+
+ info "about to run the command: $failover_command"
+ eval $failover_command
+ }
+
+ get_sentinel_master_info() {
+ if is_boolean_yes "$REDIS_TLS_ENABLED"; then
+ sentinel_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}timeout {{ .Values.sentinel.getMasterTimeout }} redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}"
+ else
+ sentinel_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}timeout {{ .Values.sentinel.getMasterTimeout }} redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}"
+ fi
+
+ info "about to run the command: $sentinel_info_command"
+ eval $sentinel_info_command
+ }
+
+ {{- if and .Values.replica.containerSecurityContext.runAsUser (eq (.Values.replica.containerSecurityContext.runAsUser | int) 0) }}
+ useradd redis
+ chown -R redis {{ .Values.replica.persistence.path }}
+ {{- end }}
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+
+ # check if there is a master
+ master_in_persisted_conf="$(get_full_hostname "$HOSTNAME")"
+ master_port_in_persisted_conf="$REDIS_MASTER_PORT_NUMBER"
+ master_in_sentinel="$(get_sentinel_master_info)"
+ redisRetVal=$?
+
+ {{- if .Values.sentinel.persistence.enabled }}
+ if [[ -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]]; then
+ master_in_persisted_conf="$(awk '/monitor/ {print $4}' /opt/bitnami/redis-sentinel/etc/sentinel.conf)"
+ master_port_in_persisted_conf="$(awk '/monitor/ {print $5}' /opt/bitnami/redis-sentinel/etc/sentinel.conf)"
+ info "Found previous master ${master_in_persisted_conf}:${master_port_in_persisted_conf} in /opt/bitnami/redis-sentinel/etc/sentinel.conf"
+ debug "$(cat /opt/bitnami/redis-sentinel/etc/sentinel.conf | grep monitor)"
+ touch /opt/bitnami/redis-sentinel/etc/.node_read
+ fi
+ {{- end }}
+
+ if [[ $redisRetVal -ne 0 ]]; then
+ if [[ "$master_in_persisted_conf" == "$(get_full_hostname "$HOSTNAME")" ]]; then
+ # Case 1: No active sentinel and in previous sentinel.conf we were the master --> MASTER
+ info "Configuring the node as master"
+ export REDIS_REPLICATION_MODE="master"
+ else
+ # Case 2: No active sentinel and in previous sentinel.conf we were not master --> REPLICA
+ info "Configuring the node as replica"
+ export REDIS_REPLICATION_MODE="replica"
+ REDIS_MASTER_HOST=${master_in_persisted_conf}
+ REDIS_MASTER_PORT_NUMBER=${master_port_in_persisted_conf}
+ fi
+ else
+ # Fetches current master's host and port
+ REDIS_SENTINEL_INFO=($(get_sentinel_master_info))
+ info "Current master: REDIS_SENTINEL_INFO=(${REDIS_SENTINEL_INFO[0]},${REDIS_SENTINEL_INFO[1]})"
+ REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]}
+ REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]}
+
+ if [[ "$REDIS_MASTER_HOST" == "$(get_full_hostname "$HOSTNAME")" ]]; then
+ # Case 3: Active sentinel and master it is this node --> MASTER
+ info "Configuring the node as master"
+ export REDIS_REPLICATION_MODE="master"
+ else
+ # Case 4: Active sentinel and master is not this node --> REPLICA
+ info "Configuring the node as replica"
+ export REDIS_REPLICATION_MODE="replica"
+
+ {{- if and .Values.sentinel.automateClusterRecovery (le (int .Values.sentinel.downAfterMilliseconds) 2000) }}
+ retry_count=1
+ while validate_quorum
+ do
+ info "sleeping, waiting for Redis master to come up"
+ sleep 1s
+ if ! ((retry_count % 11)); then
+ info "Trying to manually failover"
+ failover_result=$(trigger_manual_failover)
+
+ debug "Failover result: $failover_result"
+ fi
+
+ ((retry_count+=1))
+ done
+ info "Redis master is up now"
+ {{- end }}
+ fi
+ fi
+
+ if [[ -n "$REDIS_EXTERNAL_MASTER_HOST" ]]; then
+ REDIS_MASTER_HOST="$REDIS_EXTERNAL_MASTER_HOST"
+ REDIS_MASTER_PORT_NUMBER="${REDIS_EXTERNAL_MASTER_PORT}"
+ fi
+
+ if [[ -f /opt/bitnami/redis/mounted-etc/replica.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
+ fi
+
+ if [[ -f /opt/bitnami/redis/mounted-etc/redis.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
+ fi
+
+ echo "" >> /opt/bitnami/redis/etc/replica.conf
+ echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
+ echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
+
+ {{- if .Values.tls.enabled }}
+ ARGS=("--port" "0")
+ ARGS+=("--tls-port" "${REDIS_TLS_PORT}")
+ ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}")
+ ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}")
+ ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}")
+ ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}")
+ ARGS+=("--tls-replication" "yes")
+ {{- if .Values.tls.dhParamsFilename }}
+ ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}")
+ {{- end }}
+ {{- else }}
+ ARGS=("--port" "${REDIS_PORT}")
+ {{- end }}
+
+ if [[ "$REDIS_REPLICATION_MODE" = "slave" ]] || [[ "$REDIS_REPLICATION_MODE" = "replica" ]]; then
+ ARGS+=("--replicaof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
+ fi
+
+ {{- if .Values.auth.enabled }}
+ ARGS+=("--requirepass" "${REDIS_PASSWORD}")
+ ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
+ {{- else }}
+ ARGS+=("--protected-mode" "no")
+ {{- end }}
+ ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
+ {{- if .Values.replica.extraFlags }}
+ {{- range .Values.replica.extraFlags }}
+ ARGS+=({{ . | quote }})
+ {{- end }}
+ {{- end }}
+
+ {{- if .Values.replica.preExecCmds }}
+ {{- .Values.replica.preExecCmds | nindent 4 }}
+ {{- end }}
+
+ {{- if .Values.replica.command }}
+ exec {{ .Values.replica.command }} "${ARGS[@]}"
+ {{- else }}
+ exec redis-server "${ARGS[@]}"
+ {{- end }}
+
+ start-sentinel.sh: |
+ #!/bin/bash
+
+ . /opt/bitnami/scripts/libos.sh
+ . /opt/bitnami/scripts/libvalidations.sh
+ . /opt/bitnami/scripts/libfile.sh
+
+ HEADLESS_SERVICE="{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}"
+ REDIS_SERVICE="{{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}"
+
+ get_port() {
+ hostname="$1"
+ type="$2"
+
+ port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
+ port=${!port_var}
+
+ if [ -z "$port" ]; then
+ case $type in
+ "SENTINEL")
+ echo {{ .Values.sentinel.containerPorts.sentinel }}
+ ;;
+ "REDIS")
+ echo {{ .Values.master.containerPorts.redis }}
+ ;;
+ esac
+ else
+ echo $port
+ fi
+ }
+
+ get_full_hostname() {
+ hostname="$1"
+
+ {{- if .Values.useExternalDNS.enabled }}
+ echo "${hostname}.{{- include "redis.externalDNS.suffix" . }}"
+ {{- else if eq .Values.sentinel.service.type "NodePort" }}
+ echo "${hostname}.{{- .Release.Namespace }}"
+ {{- else }}
+ echo "${hostname}.${HEADLESS_SERVICE}"
+ {{- end }}
+ }
+
+ SERVPORT=$(get_port "$HOSTNAME" "SENTINEL")
+ REDISPORT=$(get_port "$HOSTNAME" "REDIS")
+ SENTINEL_SERVICE_PORT=$(get_port "{{ include "common.names.fullname" . }}" "TCP_SENTINEL")
+
+ sentinel_conf_set() {
+ local -r key="${1:?missing key}"
+ local value="${2:-}"
+
+ # Sanitize inputs
+ value="${value//\\/\\\\}"
+ value="${value//&/\\&}"
+ value="${value//\?/\\?}"
+ [[ "$value" = "" ]] && value="\"$value\""
+
+ replace_in_file "/opt/bitnami/redis-sentinel/etc/sentinel.conf" "^#*\s*${key} .*" "${key} ${value}" false
+ }
+ sentinel_conf_add() {
+ echo $'\n'"$@" >> "/opt/bitnami/redis-sentinel/etc/sentinel.conf"
+ }
+ host_id() {
+ echo "$1" | openssl sha1 | awk '{print $2}'
+ }
+ get_sentinel_master_info() {
+ if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then
+ sentinel_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}timeout {{ .Values.sentinel.getMasterTimeout }} redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}"
+ else
+ sentinel_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}timeout {{ .Values.sentinel.getMasterTimeout }} redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}"
+ fi
+ info "about to run the command: $sentinel_info_command"
+ eval $sentinel_info_command
+ }
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+
+ master_in_persisted_conf="$(get_full_hostname "$HOSTNAME")"
+
+ {{- if .Values.sentinel.persistence.enabled }}
+ if [[ -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]]; then
+ check_lock_file() {
+ [[ -f /opt/bitnami/redis-sentinel/etc/.node_read ]]
+ }
+ retry_while "check_lock_file"
+ rm -f /opt/bitnami/redis-sentinel/etc/.node_read
+ master_in_persisted_conf="$(awk '/monitor/ {print $4}' /opt/bitnami/redis-sentinel/etc/sentinel.conf)"
+ info "Found previous master $master_in_persisted_conf in /opt/bitnami/redis-sentinel/etc/sentinel.conf"
+ debug "$(cat /opt/bitnami/redis-sentinel/etc/sentinel.conf | grep monitor)"
+ fi
+ {{- end }}
+ if ! get_sentinel_master_info && [[ "$master_in_persisted_conf" == "$(get_full_hostname "$HOSTNAME")" ]]; then
+ # No master found, lets create a master node
+ export REDIS_REPLICATION_MODE="master"
+
+ REDIS_MASTER_HOST=$(get_full_hostname "$HOSTNAME")
+ REDIS_MASTER_PORT_NUMBER="$REDISPORT"
+ else
+ export REDIS_REPLICATION_MODE="replica"
+
+ # Fetches current master's host and port
+ REDIS_SENTINEL_INFO=($(get_sentinel_master_info))
+ info "printing REDIS_SENTINEL_INFO=(${REDIS_SENTINEL_INFO[0]},${REDIS_SENTINEL_INFO[1]})"
+ REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]}
+ REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]}
+ fi
+
+ if [[ -n "$REDIS_EXTERNAL_MASTER_HOST" ]]; then
+ REDIS_MASTER_HOST="$REDIS_EXTERNAL_MASTER_HOST"
+ REDIS_MASTER_PORT_NUMBER="${REDIS_EXTERNAL_MASTER_PORT}"
+ fi
+
+ cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf
+ {{- if .Values.auth.enabled }}
+ printf "\nsentinel auth-pass %s %s" "{{ .Values.sentinel.masterSet }}" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf
+ {{- if and .Values.auth.enabled .Values.auth.sentinel }}
+ printf "\nrequirepass %s" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf
+ {{- end }}
+ {{- end }}
+ printf "\nsentinel myid %s" "$(host_id "$HOSTNAME")" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf
+
+ if [[ -z "$REDIS_MASTER_HOST" ]] || [[ -z "$REDIS_MASTER_PORT_NUMBER" ]]
+ then
+ # Prevent incorrect configuration to be written to sentinel.conf
+ error "Redis master host is configured incorrectly (host: $REDIS_MASTER_HOST, port: $REDIS_MASTER_PORT_NUMBER)"
+ exit 1
+ fi
+
+ sentinel_conf_set "sentinel monitor" "{{ .Values.sentinel.masterSet }} "$REDIS_MASTER_HOST" "$REDIS_MASTER_PORT_NUMBER" {{ .Values.sentinel.quorum }}"
+
+ add_known_sentinel() {
+ hostname="$1"
+ ip="$2"
+
+ if [[ -n "$hostname" && -n "$ip" && "$hostname" != "$HOSTNAME" ]]; then
+ sentinel_conf_add "sentinel known-sentinel {{ .Values.sentinel.masterSet }} $(get_full_hostname "$hostname") $(get_port "$hostname" "SENTINEL") $(host_id "$hostname")"
+ fi
+ }
+ add_known_replica() {
+ hostname="$1"
+ ip="$2"
+
+ if [[ -n "$ip" && "$(get_full_hostname "$hostname")" != "$REDIS_MASTER_HOST" ]]; then
+ sentinel_conf_add "sentinel known-replica {{ .Values.sentinel.masterSet }} $(get_full_hostname "$hostname") $(get_port "$hostname" "REDIS")"
+ fi
+ }
+
+ # Add available hosts on the network as known replicas & sentinels
+ for node in $(seq 0 $(({{ .Values.replica.replicaCount }}-1))); do
+ hostname="{{ template "common.names.fullname" . }}-node-$node"
+ ip="$(getent hosts "$hostname.$HEADLESS_SERVICE" | awk '{ print $1 }')"
+ add_known_sentinel "$hostname" "$ip"
+ add_known_replica "$hostname" "$ip"
+ done
+
+ echo "" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf
+ {{- if not (contains "sentinel announce-hostnames" .Values.sentinel.configuration) }}
+ echo "sentinel announce-hostnames yes" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf
+ {{- end }}
+ {{- if not (contains "sentinel resolve-hostnames" .Values.sentinel.configuration) }}
+ echo "sentinel resolve-hostnames yes" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf
+ {{- end }}
+ {{- if not (contains "sentinel announce-port" .Values.sentinel.configuration) }}
+ echo "sentinel announce-port $SERVPORT" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf
+ {{- end }}
+ {{- if not (contains "sentinel announce-ip" .Values.sentinel.configuration) }}
+ echo "sentinel announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf
+ {{- end }}
+
+ {{- if .Values.tls.enabled }}
+ ARGS=("--port" "0")
+ ARGS+=("--tls-port" "${REDIS_SENTINEL_TLS_PORT_NUMBER}")
+ ARGS+=("--tls-cert-file" "${REDIS_SENTINEL_TLS_CERT_FILE}")
+ ARGS+=("--tls-key-file" "${REDIS_SENTINEL_TLS_KEY_FILE}")
+ ARGS+=("--tls-ca-cert-file" "${REDIS_SENTINEL_TLS_CA_FILE}")
+ ARGS+=("--tls-replication" "yes")
+ ARGS+=("--tls-auth-clients" "${REDIS_SENTINEL_TLS_AUTH_CLIENTS}")
+ {{- if .Values.tls.dhParamsFilename }}
+ ARGS+=("--tls-dh-params-file" "${REDIS_SENTINEL_TLS_DH_PARAMS_FILE}")
+ {{- end }}
+ {{- end }}
+ {{- if .Values.sentinel.preExecCmds }}
+ {{ .Values.sentinel.preExecCmds | nindent 4 }}
+ {{- end }}
+ exec redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf {{- if .Values.tls.enabled }} "${ARGS[@]}" {{- end }} --sentinel
+ prestop-sentinel.sh: |
+ #!/bin/bash
+
+ . /opt/bitnami/scripts/libvalidations.sh
+ . /opt/bitnami/scripts/libos.sh
+
+ HEADLESS_SERVICE="{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}"
+ SENTINEL_SERVICE_ENV_NAME={{ printf "%s%s" (upper (include "common.names.fullname" .)| replace "-" "_") "_SERVICE_PORT_TCP_SENTINEL" }}
+ SENTINEL_SERVICE_PORT=${!SENTINEL_SERVICE_ENV_NAME}
+
+ get_full_hostname() {
+ hostname="$1"
+
+ {{- if .Values.useExternalDNS.enabled }}
+ echo "${hostname}.{{- include "redis.externalDNS.suffix" . }}"
+ {{- else if eq .Values.sentinel.service.type "NodePort" }}
+ echo "${hostname}.{{- .Release.Namespace }}"
+ {{- else }}
+ echo "${hostname}.${HEADLESS_SERVICE}"
+ {{- end }}
+ }
+ run_sentinel_command() {
+ if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then
+ redis-cli -h "$REDIS_SERVICE" -p "$SENTINEL_SERVICE_PORT" --tls --cert "$REDIS_SENTINEL_TLS_CERT_FILE" --key "$REDIS_SENTINEL_TLS_KEY_FILE" --cacert "$REDIS_SENTINEL_TLS_CA_FILE" sentinel "$@"
+ else
+ redis-cli -h "$REDIS_SERVICE" -p "$SENTINEL_SERVICE_PORT" sentinel "$@"
+ fi
+ }
+ sentinel_failover_finished() {
+ REDIS_SENTINEL_INFO=($(run_sentinel_command get-master-addr-by-name "{{ .Values.sentinel.masterSet }}"))
+ REDIS_MASTER_HOST="${REDIS_SENTINEL_INFO[0]}"
+ [[ "$REDIS_MASTER_HOST" != "$(get_full_hostname $HOSTNAME)" ]]
+ }
+
+ REDIS_SERVICE="{{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}"
+
+ {{ if .Values.auth.sentinel -}}
+ # redis-cli automatically consumes credentials from the REDISCLI_AUTH variable
+ [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
+ [[ -f "$REDIS_PASSWORD_FILE" ]] && export REDISCLI_AUTH="$(< "${REDIS_PASSWORD_FILE}")"
+ {{- end }}
+
+ if ! sentinel_failover_finished; then
+ echo "I am the master pod and you are stopping me. Starting sentinel failover"
+ if retry_while "sentinel_failover_finished" "{{ sub .Values.sentinel.terminationGracePeriodSeconds 10 }}" 1; then
+ echo "Master has been successfuly failed over to a different pod."
+ exit 0
+ else
+ echo "Master failover failed"
+ exit 1
+ fi
+ else
+ exit 0
+ fi
+ prestop-redis.sh: |
+ #!/bin/bash
+
+ . /opt/bitnami/scripts/libvalidations.sh
+ . /opt/bitnami/scripts/libos.sh
+
+ run_redis_command() {
+ if is_boolean_yes "$REDIS_TLS_ENABLED"; then
+ redis-cli -h 127.0.0.1 -p "$REDIS_TLS_PORT" --tls --cert "$REDIS_TLS_CERT_FILE" --key "$REDIS_TLS_KEY_FILE" --cacert "$REDIS_TLS_CA_FILE" "$@"
+ else
+ redis-cli -h 127.0.0.1 -p "$REDIS_PORT" "$@"
+ fi
+ }
+ is_master() {
+ REDIS_ROLE=$(run_redis_command role | head -1)
+ [[ "$REDIS_ROLE" == "master" ]]
+ }
+
+ HEADLESS_SERVICE="{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}"
+ SENTINEL_SERVICE_ENV_NAME={{ printf "%s%s" (upper (include "common.names.fullname" .)| replace "-" "_") "_SERVICE_PORT_TCP_SENTINEL" }}
+ SENTINEL_SERVICE_PORT=${!SENTINEL_SERVICE_ENV_NAME}
+
+ get_full_hostname() {
+ hostname="$1"
+
+ {{- if .Values.useExternalDNS.enabled }}
+ echo "${hostname}.{{- include "redis.externalDNS.suffix" . }}"
+ {{- else if eq .Values.sentinel.service.type "NodePort" }}
+ echo "${hostname}.{{- .Release.Namespace }}"
+ {{- else }}
+ echo "${hostname}.${HEADLESS_SERVICE}"
+ {{- end }}
+ }
+ run_sentinel_command() {
+ if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then
+ {{ .Values.auth.sentinel | ternary "" "env -u REDISCLI_AUTH " -}} redis-cli -h "$REDIS_SERVICE" -p "$SENTINEL_SERVICE_PORT" --tls --cert "$REDIS_SENTINEL_TLS_CERT_FILE" --key "$REDIS_SENTINEL_TLS_KEY_FILE" --cacert "$REDIS_SENTINEL_TLS_CA_FILE" sentinel "$@"
+ else
+ {{ .Values.auth.sentinel | ternary "" "env -u REDISCLI_AUTH " -}} redis-cli -h "$REDIS_SERVICE" -p "$SENTINEL_SERVICE_PORT" sentinel "$@"
+ fi
+ }
+ sentinel_failover_finished() {
+ REDIS_SENTINEL_INFO=($(run_sentinel_command get-master-addr-by-name "{{ .Values.sentinel.masterSet }}"))
+ REDIS_MASTER_HOST="${REDIS_SENTINEL_INFO[0]}"
+ [[ "$REDIS_MASTER_HOST" != "$(get_full_hostname $HOSTNAME)" ]]
+ }
+
+ REDIS_SERVICE="{{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}"
+
+ # redis-cli automatically consumes credentials from the REDISCLI_AUTH variable
+ [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
+ [[ -f "$REDIS_PASSWORD_FILE" ]] && export REDISCLI_AUTH="$(< "${REDIS_PASSWORD_FILE}")"
+
+
+ if is_master && ! sentinel_failover_finished; then
+ echo "I am the master pod and you are stopping me. Pausing client connections."
+ # Pausing client write connections to avoid data loss
+ run_redis_command CLIENT PAUSE "{{ mul (add 2 (sub .Values.sentinel.terminationGracePeriodSeconds 10)) 1000 }}" WRITE
+
+ echo "Issuing failover"
+ # if I am the master, issue a command to failover once
+ run_sentinel_command failover "{{ .Values.sentinel.masterSet }}"
+
+ {{- if .Values.sentinel.redisShutdownWaitFailover }}
+ echo "Waiting for sentinel to complete failover for up to {{ sub .Values.sentinel.terminationGracePeriodSeconds 10 }}s"
+ retry_while "sentinel_failover_finished" "{{ sub .Values.sentinel.terminationGracePeriodSeconds 10 }}" 1
+ {{- end }}
+ else
+ exit 0
+ fi
+
+{{- else }}
+ start-master.sh: |
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ {{- if and .Values.master.containerSecurityContext.runAsUser (eq (.Values.master.containerSecurityContext.runAsUser | int) 0) }}
+ useradd redis
+ chown -R redis {{ .Values.master.persistence.path }}
+ {{- end }}
+ if [[ -f /opt/bitnami/redis/mounted-etc/master.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
+ fi
+ if [[ -f /opt/bitnami/redis/mounted-etc/redis.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
+ fi
+ {{- if .Values.tls.enabled }}
+ ARGS=("--port" "0")
+ ARGS+=("--tls-port" "${REDIS_TLS_PORT}")
+ ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}")
+ ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}")
+ ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}")
+ ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}")
+ {{- if .Values.tls.dhParamsFilename }}
+ ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}")
+ {{- end }}
+ {{- else }}
+ ARGS=("--port" "${REDIS_PORT}")
+ {{- end }}
+ {{- if .Values.auth.enabled }}
+ ARGS+=("--requirepass" "${REDIS_PASSWORD}")
+ ARGS+=("--masterauth" "${REDIS_PASSWORD}")
+ {{- else }}
+ ARGS+=("--protected-mode" "no")
+ {{- end }}
+ ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
+ {{- if .Values.master.extraFlags }}
+ {{- range .Values.master.extraFlags }}
+ ARGS+=({{ . | quote }})
+ {{- end }}
+ {{- end }}
+ {{- if .Values.master.preExecCmds }}
+ {{ .Values.master.preExecCmds | nindent 4 }}
+ {{- end }}
+ {{- if .Values.master.command }}
+ exec {{ .Values.master.command }} "${ARGS[@]}"
+ {{- else }}
+ exec redis-server "${ARGS[@]}"
+ {{- end }}
+ {{- if eq .Values.architecture "replication" }}
+ start-replica.sh: |
+ #!/bin/bash
+
+ get_port() {
+ hostname="$1"
+ type="$2"
+
+ port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
+ port=${!port_var}
+
+ if [ -z "$port" ]; then
+ case $type in
+ "SENTINEL")
+ echo {{ .Values.sentinel.containerPorts.sentinel }}
+ ;;
+ "REDIS")
+ echo {{ .Values.master.containerPorts.redis }}
+ ;;
+ esac
+ else
+ echo $port
+ fi
+ }
+
+ get_full_hostname() {
+ hostname="$1"
+
+ {{- if .Values.useExternalDNS.enabled }}
+ echo "${hostname}.{{- include "redis.externalDNS.suffix" . }}"
+ {{- else if eq .Values.sentinel.service.type "NodePort" }}
+ echo "${hostname}.{{- .Release.Namespace }}"
+ {{- else }}
+ echo "${hostname}.${HEADLESS_SERVICE}"
+ {{- end }}
+ }
+
+ REDISPORT=$(get_port "$HOSTNAME" "REDIS")
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ {{- if and .Values.replica.containerSecurityContext.runAsUser (eq (.Values.replica.containerSecurityContext.runAsUser | int) 0) }}
+ useradd redis
+ chown -R redis {{ .Values.replica.persistence.path }}
+ {{- end }}
+ if [[ -f /opt/bitnami/redis/mounted-etc/replica.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
+ fi
+ if [[ -f /opt/bitnami/redis/mounted-etc/redis.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
+ fi
+
+ echo "" >> /opt/bitnami/redis/etc/replica.conf
+ echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
+ echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
+
+ {{- if .Values.tls.enabled }}
+ ARGS=("--port" "0")
+ ARGS+=("--tls-port" "${REDIS_TLS_PORT}")
+ ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}")
+ ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}")
+ ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}")
+ ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}")
+ ARGS+=("--tls-replication" "yes")
+ {{- if .Values.tls.dhParamsFilename }}
+ ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}")
+ {{- end }}
+ {{- else }}
+ ARGS=("--port" "${REDIS_PORT}")
+ {{- end }}
+ ARGS+=("--replicaof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
+ {{- if .Values.auth.enabled }}
+ ARGS+=("--requirepass" "${REDIS_PASSWORD}")
+ ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
+ {{- else }}
+ ARGS+=("--protected-mode" "no")
+ {{- end }}
+ ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
+ {{- if .Values.replica.extraFlags }}
+ {{- range .Values.replica.extraFlags }}
+ ARGS+=({{ . | quote }})
+ {{- end }}
+ {{- end }}
+ {{- if .Values.replica.preExecCmds }}
+ {{ .Values.replica.preExecCmds | nindent 4 }}
+ {{- end }}
+ {{- if .Values.replica.command }}
+ exec {{ .Values.replica.command }} "${ARGS[@]}"
+ {{- else }}
+ exec redis-server "${ARGS[@]}"
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/secret.yaml b/charts/penpot/charts/redis/templates/secret.yaml
new file mode 100644
index 0000000..2edc0d8
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/secret.yaml
@@ -0,0 +1,23 @@
+{{- if and .Values.auth.enabled (not .Values.auth.existingSecret) -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ template "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.secretAnnotations .Values.commonAnnotations }}
+ annotations:
+ {{- if .Values.secretAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.secretAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+type: Opaque
+data:
+ redis-password: {{ include "redis.password" . | b64enc | quote }}
+{{- end -}}
diff --git a/charts/penpot/charts/redis/templates/sentinel/hpa.yaml b/charts/penpot/charts/redis/templates/sentinel/hpa.yaml
new file mode 100644
index 0000000..e1b765e
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/sentinel/hpa.yaml
@@ -0,0 +1,47 @@
+{{- if and .Values.replica.autoscaling.enabled .Values.sentinel.enabled }}
+apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }}
+kind: HorizontalPodAutoscaler
+metadata:
+ name: {{ printf "%s-node" (include "common.names.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: replica
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ scaleTargetRef:
+ apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }}
+ kind: StatefulSet
+ name: {{ printf "%s-node" (include "common.names.fullname" .) }}
+ minReplicas: {{ .Values.replica.autoscaling.minReplicas }}
+ maxReplicas: {{ .Values.replica.autoscaling.maxReplicas }}
+ metrics:
+ {{- if .Values.replica.autoscaling.targetMemory }}
+ - type: Resource
+ resource:
+ name: memory
+ {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }}
+ targetAverageUtilization: {{ .Values.replica.autoscaling.targetMemory }}
+ {{- else }}
+ target:
+ type: Utilization
+ averageUtilization: {{ .Values.replica.autoscaling.targetMemory }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.replica.autoscaling.targetCPU }}
+ - type: Resource
+ resource:
+ name: cpu
+ {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }}
+ targetAverageUtilization: {{ .Values.replica.autoscaling.targetCPU }}
+ {{- else }}
+ target:
+ type: Utilization
+ averageUtilization: {{ .Values.replica.autoscaling.targetCPU }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/sentinel/node-services.yaml b/charts/penpot/charts/redis/templates/sentinel/node-services.yaml
new file mode 100644
index 0000000..d3e635e
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/sentinel/node-services.yaml
@@ -0,0 +1,70 @@
+{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled (eq .Values.sentinel.service.type "NodePort") (or .Release.IsUpgrade .Values.sentinel.service.nodePorts.redis ) }}
+
+{{- range $i := until (int .Values.replica.replicaCount) }}
+
+{{ $portsmap := (lookup "v1" "ConfigMap" $.Release.Namespace (printf "%s-%s" ( include "common.names.fullname" $ ) "ports-configmap")).data }}
+
+{{ $sentinelport := 0}}
+{{ $redisport := 0}}
+{{- if $portsmap }}
+{{ $sentinelport = index $portsmap (printf "%s-node-%s-%s" (include "common.names.fullname" $) (toString $i) "sentinel") }}
+{{ $redisport = index $portsmap (printf "%s-node-%s-%s" (include "common.names.fullname" $) (toString $i) "redis") }}
+{{- else }}
+{{- end }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "common.names.fullname" $ }}-node-{{ $i }}
+ namespace: {{ $.Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" $ | nindent 4 }}
+ app.kubernetes.io/component: node
+ {{- if $.Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or $.Values.sentinel.service.annotations $.Values.commonAnnotations }}
+ annotations:
+ {{- if $.Values.sentinel.service.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" $.Values.sentinel.service.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if $.Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+spec:
+ type: NodePort
+ ports:
+ - name: sentinel
+ {{- if $.Values.sentinel.service.nodePorts.sentinel }}
+ nodePort: {{ (add $.Values.sentinel.service.nodePorts.sentinel $i 1) }}
+ port: {{ (add $.Values.sentinel.service.nodePorts.sentinel $i 1) }}
+ {{- else }}
+ nodePort: {{ $sentinelport }}
+ port: {{ $sentinelport }}
+ {{- end }}
+ protocol: TCP
+ targetPort: {{ $.Values.sentinel.containerPorts.sentinel }}
+ - name: redis
+ {{- if $.Values.sentinel.service.nodePorts.redis }}
+ nodePort: {{ (add $.Values.sentinel.service.nodePorts.redis $i 1) }}
+ port: {{ (add $.Values.sentinel.service.nodePorts.redis $i 1) }}
+ {{- else }}
+ nodePort: {{ $redisport }}
+ port: {{ $redisport }}
+ {{- end }}
+ protocol: TCP
+ targetPort: {{ $.Values.replica.containerPorts.redis }}
+ - name: sentinel-internal
+ nodePort: null
+ port: {{ $.Values.sentinel.containerPorts.sentinel }}
+ protocol: TCP
+ targetPort: {{ $.Values.sentinel.containerPorts.sentinel }}
+ - name: redis-internal
+ nodePort: null
+ port: {{ $.Values.replica.containerPorts.redis }}
+ protocol: TCP
+ targetPort: {{ $.Values.replica.containerPorts.redis }}
+ selector:
+ statefulset.kubernetes.io/pod-name: {{ template "common.names.fullname" $ }}-node-{{ $i }}
+---
+{{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/sentinel/ports-configmap.yaml b/charts/penpot/charts/redis/templates/sentinel/ports-configmap.yaml
new file mode 100644
index 0000000..f5e7b2a
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/sentinel/ports-configmap.yaml
@@ -0,0 +1,100 @@
+{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled (eq .Values.sentinel.service.type "NodePort") (not .Values.sentinel.service.nodePorts.redis ) }}
+{{- /* create a list to keep track of ports we choose to use */}}
+{{ $chosenports := (list ) }}
+
+{{- /* Get list of all used nodeports */}}
+{{ $usedports := (list ) }}
+{{- range $index, $service := (lookup "v1" "Service" "" "").items }}
+ {{- range.spec.ports }}
+ {{- if .nodePort }}
+ {{- $usedports = (append $usedports .nodePort) }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+
+{{- /*
+comments that start with # are rendered in the output when you debug, so you can less and search for them
+Vars in the comment will be rendered out, so you can check their value this way.
+https://helm.sh/docs/chart_best_practices/templates/#comments-yaml-comments-vs-template-comments
+
+remove the template comments and leave the yaml comments to help debug
+*/}}
+
+{{- /* Sort the list */}}
+{{ $usedports = $usedports | sortAlpha }}
+#usedports {{ $usedports }}
+
+{{- /* How many nodeports per service do we want to create, except for the main service which is always two */}}
+{{ $numberofPortsPerNodeService := 2 }}
+
+{{- /* for every nodeport we want, loop though the used ports to get an unused port */}}
+{{- range $j := until (int (add (mul (int .Values.replica.replicaCount) $numberofPortsPerNodeService) 2)) }}
+ {{- /* #j={{ $j }} */}}
+ {{- $nodeport := (add $j 30000) }}
+ {{- $nodeportfound := false }}
+ {{- range $i := $usedports }}
+ {{- /* #i={{ $i }}
+ #nodeport={{ $nodeport }}
+ #usedports={{ $usedports }} */}}
+ {{- if and (has (toString $nodeport) $usedports) (eq $nodeportfound false) }}
+ {{- /* nodeport conflicts with in use */}}
+ {{- $nodeport = (add $nodeport 1) }}
+ {{- else if and ( has $nodeport $chosenports) (eq $nodeportfound false) }}
+ {{- /* nodeport already chosen, try another */}}
+ {{- $nodeport = (add $nodeport 1) }}
+ {{- else if (eq $nodeportfound false) }}
+ {{- /* nodeport free to use: not already claimed and not in use */}}
+ {{- /* select nodeport, and place into usedports */}}
+ {{- $chosenports = (append $chosenports $nodeport) }}
+ {{- $nodeportfound = true }}
+ {{- else }}
+ {{- /* nodeport has already been chosen and locked in, just work through the rest of the list to get to the next nodeport selection */}}
+ {{- end }}
+ {{- end }}
+ {{- if (eq $nodeportfound false) }}
+ {{- $chosenports = (append $chosenports $nodeport) }}
+ {{- end }}
+
+{{- end }}
+
+{{- /* print the usedports and chosenports for debugging */}}
+#usedports {{ $usedports }}
+#chosenports {{ $chosenports }}}}
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "common.names.fullname" . }}-ports-configmap
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations:
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+{{ $portsmap := (lookup "v1" "ConfigMap" $.Release.Namespace (printf "%s-%s" ( include "common.names.fullname" . ) "ports-configmap")).data }}
+{{- if $portsmap }}
+{{- /* configmap already exists, do not install again */ -}}
+ {{- range $name, $value := $portsmap }}
+ "{{ $name }}": "{{ $value }}"
+ {{- end }}
+{{- else }}
+{{- /* configmap being set for first time */ -}}
+ {{- range $index, $port := $chosenports }}
+ {{- $nodenumber := (floor (div $index 2)) }}
+ {{- if (eq $index 0) }}
+ "{{ template "common.names.fullname" $ }}-sentinel": "{{ $port }}"
+ {{- else if (eq $index 1) }}
+ "{{ template "common.names.fullname" $ }}-redis": "{{ $port }}"
+ {{- else if (eq (mod $index 2) 0) }}
+ "{{ template "common.names.fullname" $ }}-node-{{ (sub $nodenumber 1) }}-sentinel": "{{ $port }}"
+ {{- else if (eq (mod $index 2) 1) }}
+ "{{ template "common.names.fullname" $ }}-node-{{ (sub $nodenumber 1) }}-redis": "{{ $port }}"
+ {{- end }}
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/sentinel/service.yaml b/charts/penpot/charts/redis/templates/sentinel/service.yaml
new file mode 100644
index 0000000..362d681
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/sentinel/service.yaml
@@ -0,0 +1,103 @@
+{{- if or .Release.IsUpgrade (ne .Values.sentinel.service.type "NodePort") .Values.sentinel.service.nodePorts.redis -}}
+{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }}
+{{ $portsmap := (lookup "v1" "ConfigMap" $.Release.Namespace (printf "%s-%s" ( include "common.names.fullname" . ) "ports-configmap")).data }}
+
+{{ $sentinelport := 0}}
+{{ $redisport := 0}}
+{{- if $portsmap }}
+{{ $sentinelport = index $portsmap (printf "%s-%s" (include "common.names.fullname" $) "sentinel") }}
+{{ $redisport = index $portsmap (printf "%s-%s" (include "common.names.fullname" $) "redis") }}
+{{- else }}
+{{- end }}
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: node
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.sentinel.service.annotations .Values.commonAnnotations }}
+ annotations:
+ {{- if .Values.sentinel.service.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.service.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+spec:
+ type: {{ .Values.sentinel.service.type }}
+ {{- if or (eq .Values.sentinel.service.type "LoadBalancer") (eq .Values.sentinel.service.type "NodePort") }}
+ externalTrafficPolicy: {{ .Values.sentinel.service.externalTrafficPolicy | quote }}
+ {{- end }}
+ {{- if and (eq .Values.sentinel.service.type "LoadBalancer") (not (empty .Values.sentinel.service.loadBalancerIP)) }}
+ loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }}
+ {{- end }}
+ {{- if and (eq .Values.sentinel.service.type "LoadBalancer") (not (empty .Values.sentinel.service.loadBalancerSourceRanges)) }}
+ loadBalancerSourceRanges: {{ .Values.sentinel.service.loadBalancerSourceRanges }}
+ {{- end }}
+ {{- if and .Values.sentinel.service.clusterIP (eq .Values.sentinel.service.type "ClusterIP") }}
+ clusterIP: {{ .Values.sentinel.service.clusterIP }}
+ {{- end }}
+ {{- if .Values.sentinel.service.sessionAffinity }}
+ sessionAffinity: {{ .Values.sentinel.service.sessionAffinity }}
+ {{- end }}
+ {{- if .Values.sentinel.service.sessionAffinityConfig }}
+ sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.service.sessionAffinityConfig "context" $) | nindent 4 }}
+ {{- end }}
+ ports:
+ - name: tcp-redis
+ {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.redis }}
+ port: {{ .Values.sentinel.service.nodePorts.redis }}
+ {{- else if eq .Values.sentinel.service.type "NodePort" }}
+ port: {{ $redisport }}
+ {{- else}}
+ port: {{ .Values.sentinel.service.ports.redis }}
+ {{- end }}
+ targetPort: {{ .Values.replica.containerPorts.redis }}
+ {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.redis }}
+ nodePort: {{ .Values.sentinel.service.nodePorts.redis }}
+ {{- else if eq .Values.sentinel.service.type "ClusterIP" }}
+ nodePort: null
+ {{- else if eq .Values.sentinel.service.type "NodePort" }}
+ nodePort: {{ $redisport }}
+ {{- end }}
+ - name: tcp-sentinel
+ {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.sentinel }}
+ port: {{ .Values.sentinel.service.nodePorts.sentinel }}
+ {{- else if eq .Values.sentinel.service.type "NodePort" }}
+ port: {{ $sentinelport }}
+ {{- else }}
+ port: {{ .Values.sentinel.service.ports.sentinel }}
+ {{- end }}
+ targetPort: {{ .Values.sentinel.containerPorts.sentinel }}
+ {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.sentinel }}
+ nodePort: {{ .Values.sentinel.service.nodePorts.sentinel }}
+ {{- else if eq .Values.sentinel.service.type "ClusterIP" }}
+ nodePort: null
+ {{- else if eq .Values.sentinel.service.type "NodePort" }}
+ nodePort: {{ $sentinelport }}
+ {{- end }}
+ {{- if eq .Values.sentinel.service.type "NodePort" }}
+ - name: sentinel-internal
+ nodePort: null
+ port: {{ .Values.sentinel.containerPorts.sentinel }}
+ protocol: TCP
+ targetPort: {{ .Values.sentinel.containerPorts.sentinel }}
+ - name: redis-internal
+ nodePort: null
+ port: {{ .Values.replica.containerPorts.redis }}
+ protocol: TCP
+ targetPort: {{ .Values.replica.containerPorts.redis }}
+ {{- end }}
+ {{- if .Values.sentinel.service.extraPorts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.service.extraPorts "context" $) | nindent 4 }}
+ {{- end }}
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+ app.kubernetes.io/component: node
+{{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/sentinel/statefulset.yaml b/charts/penpot/charts/redis/templates/sentinel/statefulset.yaml
new file mode 100644
index 0000000..205d559
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/sentinel/statefulset.yaml
@@ -0,0 +1,774 @@
+{{- if or .Release.IsUpgrade (ne .Values.sentinel.service.type "NodePort") .Values.sentinel.service.nodePorts.redis -}}
+{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }}
+apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
+kind: StatefulSet
+metadata:
+ name: {{ printf "%s-node" (include "common.names.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: node
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ replicas: {{ .Values.replica.replicaCount }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ app.kubernetes.io/component: node
+ serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) }}
+ {{- if .Values.replica.updateStrategy }}
+ updateStrategy: {{- toYaml .Values.replica.updateStrategy | nindent 4 }}
+ {{- end }}
+ {{- if and .Values.replica.minReadySeconds (semverCompare ">= 1.25" (include "common.capabilities.kubeVersion" .)) }}
+ minReadySeconds: {{ .Values.replica.minReadySeconds }}
+ {{- end }}
+ {{- if .Values.replica.podManagementPolicy }}
+ podManagementPolicy: {{ .Values.replica.podManagementPolicy | quote }}
+ {{- end }}
+ template:
+ metadata:
+ labels: {{- include "common.labels.standard" . | nindent 8 }}
+ app.kubernetes.io/component: node
+ {{- if .Values.replica.podLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.replica.podLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if and .Values.metrics.enabled .Values.metrics.podLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ annotations:
+ {{- if (include "redis.createConfigmap" .) }}
+ checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
+ {{- end }}
+ checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }}
+ checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }}
+ checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
+ {{- if .Values.replica.podAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.replica.podAnnotations "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $ ) | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- include "redis.imagePullSecrets" . | nindent 6 }}
+ {{- if .Values.replica.hostAliases }}
+ hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.replica.hostAliases "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.replica.podSecurityContext.enabled }}
+ securityContext: {{- omit .Values.replica.podSecurityContext "enabled" | toYaml | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ template "redis.serviceAccountName" . }}
+ {{- if .Values.replica.priorityClassName }}
+ priorityClassName: {{ .Values.replica.priorityClassName | quote }}
+ {{- end }}
+ {{- if .Values.replica.affinity }}
+ affinity: {{- include "common.tplvalues.render" (dict "value" .Values.replica.affinity "context" $) | nindent 8 }}
+ {{- else }}
+ affinity:
+ podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.replica.podAffinityPreset "component" "node" "context" $) | nindent 10 }}
+ podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.replica.podAntiAffinityPreset "component" "node" "context" $) | nindent 10 }}
+ nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.replica.nodeAffinityPreset.type "key" .Values.replica.nodeAffinityPreset.key "values" .Values.replica.nodeAffinityPreset.values) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.replica.nodeSelector }}
+ nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.replica.nodeSelector "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.replica.tolerations }}
+ tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.replica.tolerations "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.replica.topologySpreadConstraints }}
+ topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.replica.topologySpreadConstraints "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.replica.shareProcessNamespace }}
+ shareProcessNamespace: {{ .Values.replica.shareProcessNamespace }}
+ {{- end }}
+ {{- if .Values.replica.schedulerName }}
+ schedulerName: {{ .Values.replica.schedulerName | quote }}
+ {{- end }}
+ {{- if .Values.replica.dnsPolicy }}
+ dnsPolicy: {{ .Values.replica.dnsPolicy }}
+ {{- end }}
+ {{- if .Values.replica.dnsConfig }}
+ dnsConfig: {{- include "common.tplvalues.render" (dict "value" .Values.replica.dnsConfig "context" $) | nindent 8 }}
+ {{- end }}
+ terminationGracePeriodSeconds: {{ .Values.sentinel.terminationGracePeriodSeconds }}
+ containers:
+ - name: redis
+ image: {{ template "redis.image" . }}
+ imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.replica.lifecycleHooks }}
+ lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.replica.lifecycleHooks "context" $) | nindent 12 }}
+ {{- else }}
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /bin/bash
+ - -c
+ - /opt/bitnami/scripts/start-scripts/prestop-redis.sh
+ {{- end }}
+ {{- end }}
+ {{- if .Values.replica.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.replica.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ {{- else if .Values.replica.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.replica.command "context" $) | nindent 12 }}
+ {{- else }}
+ command:
+ - /bin/bash
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- else if .Values.replica.args }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.replica.args "context" $) | nindent 12 }}
+ {{- else }}
+ args:
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-node.sh
+ {{- end }}
+ env:
+ - name: BITNAMI_DEBUG
+ value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
+ - name: REDIS_MASTER_PORT_NUMBER
+ value: {{ .Values.replica.containerPorts.redis | quote }}
+ - name: ALLOW_EMPTY_PASSWORD
+ value: {{ ternary "no" "yes" .Values.auth.enabled | quote }}
+ {{- if .Values.auth.enabled }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: REDIS_PASSWORD_FILE
+ value: "/opt/bitnami/redis/secrets/redis-password"
+ - name: REDIS_MASTER_PASSWORD_FILE
+ value: "/opt/bitnami/redis/secrets/redis-password"
+ {{- else }}
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "redis.secretName" . }}
+ key: {{ template "redis.secretPasswordKey" . }}
+ - name: REDIS_MASTER_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "redis.secretName" . }}
+ key: {{ template "redis.secretPasswordKey" . }}
+ {{- end }}
+ {{- end }}
+ - name: REDIS_TLS_ENABLED
+ value: {{ ternary "yes" "no" .Values.tls.enabled | quote }}
+ {{- if .Values.tls.enabled }}
+ - name: REDIS_TLS_PORT
+ value: {{ .Values.replica.containerPorts.redis | quote }}
+ - name: REDIS_TLS_AUTH_CLIENTS
+ value: {{ ternary "yes" "no" .Values.tls.authClients | quote }}
+ - name: REDIS_TLS_CERT_FILE
+ value: {{ template "redis.tlsCert" . }}
+ - name: REDIS_TLS_KEY_FILE
+ value: {{ template "redis.tlsCertKey" . }}
+ - name: REDIS_TLS_CA_FILE
+ value: {{ template "redis.tlsCACert" . }}
+ {{- if .Values.tls.dhParamsFilename }}
+ - name: REDIS_TLS_DH_PARAMS_FILE
+ value: {{ template "redis.tlsDHParams" . }}
+ {{- end }}
+ {{- else }}
+ - name: REDIS_PORT
+ value: {{ .Values.replica.containerPorts.redis | quote }}
+ {{- end }}
+ - name: REDIS_SENTINEL_TLS_ENABLED
+ value: {{ ternary "yes" "no" .Values.tls.enabled | quote }}
+ {{- if .Values.tls.enabled }}
+ - name: REDIS_SENTINEL_TLS_PORT_NUMBER
+ value: {{ .Values.sentinel.containerPorts.sentinel | quote }}
+ - name: REDIS_SENTINEL_TLS_AUTH_CLIENTS
+ value: {{ ternary "yes" "no" .Values.tls.authClients | quote }}
+ - name: REDIS_SENTINEL_TLS_CERT_FILE
+ value: {{ template "redis.tlsCert" . }}
+ - name: REDIS_SENTINEL_TLS_KEY_FILE
+ value: {{ template "redis.tlsCertKey" . }}
+ - name: REDIS_SENTINEL_TLS_CA_FILE
+ value: {{ template "redis.tlsCACert" . }}
+ {{- if .Values.tls.dhParamsFilename }}
+ - name: REDIS_SENTINEL_TLS_DH_PARAMS_FILE
+ value: {{ template "redis.tlsDHParams" . }}
+ {{- end }}
+ {{- else }}
+ - name: REDIS_SENTINEL_PORT
+ value: {{ .Values.sentinel.containerPorts.sentinel | quote }}
+ {{- end }}
+ - name: REDIS_DATA_DIR
+ value: {{ .Values.replica.persistence.path }}
+ {{- if .Values.replica.externalMaster.enabled }}
+ - name: REDIS_EXTERNAL_MASTER_HOST
+ value: {{ .Values.replica.externalMaster.host | quote }}
+ - name: REDIS_EXTERNAL_MASTER_PORT
+ value: {{ .Values.replica.externalMaster.port | quote }}
+ {{- end }}
+ {{- if .Values.replica.extraEnvVars }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraEnvVars "context" $ ) | nindent 12 }}
+ {{- end }}
+ {{- if or .Values.replica.extraEnvVarsCM .Values.replica.extraEnvVarsSecret }}
+ envFrom:
+ {{- if .Values.replica.extraEnvVarsCM }}
+ - configMapRef:
+ name: {{ .Values.replica.extraEnvVarsCM }}
+ {{- end }}
+ {{- if .Values.replica.extraEnvVarsSecret }}
+ - secretRef:
+ name: {{ .Values.replica.extraEnvVarsSecret }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - name: redis
+ containerPort: {{ .Values.replica.containerPorts.redis }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.replica.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.replica.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.replica.startupProbe "enabled") "context" $) | nindent 12 }}
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local.sh {{ .Values.replica.livenessProbe.timeoutSeconds }}
+ {{- end }}
+ {{- if .Values.replica.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.replica.livenessProbe.enabled }}
+ livenessProbe:
+ initialDelaySeconds: {{ .Values.replica.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.replica.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.replica.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.replica.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.replica.livenessProbe.failureThreshold }}
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local.sh {{ .Values.replica.livenessProbe.timeoutSeconds }}
+ {{- end }}
+ {{- if .Values.replica.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.replica.readinessProbe.enabled }}
+ readinessProbe:
+ initialDelaySeconds: {{ .Values.replica.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.replica.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.replica.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.replica.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.replica.readinessProbe.failureThreshold }}
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local.sh {{ .Values.replica.readinessProbe.timeoutSeconds }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.replica.resources }}
+ resources: {{- toYaml .Values.replica.resources | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: start-scripts
+ mountPath: /opt/bitnami/scripts/start-scripts
+ - name: health
+ mountPath: /health
+ {{- if .Values.sentinel.persistence.enabled }}
+ - name: sentinel-data
+ mountPath: /opt/bitnami/redis-sentinel/etc
+ {{- end }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: redis-password
+ mountPath: /opt/bitnami/redis/secrets/
+ {{- end }}
+ - name: redis-data
+ mountPath: {{ .Values.replica.persistence.path }}
+ {{- if .Values.replica.persistence.subPath }}
+ subPath: {{ .Values.replica.persistence.subPath }}
+ {{- else if .Values.replica.persistence.subPathExpr }}
+ subPathExpr: {{ .Values.replica.persistence.subPathExpr }}
+ {{- end }}
+ - name: config
+ mountPath: /opt/bitnami/redis/mounted-etc
+ - name: redis-tmp-conf
+ mountPath: /opt/bitnami/redis/etc
+ - name: tmp
+ mountPath: /tmp
+ {{- if .Values.tls.enabled }}
+ - name: redis-certificates
+ mountPath: /opt/bitnami/redis/certs
+ readOnly: true
+ {{- end }}
+ {{- if .Values.replica.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraVolumeMounts "context" $ ) | nindent 12 }}
+ {{- end }}
+ - name: sentinel
+ image: {{ template "redis.sentinel.image" . }}
+ imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.sentinel.lifecycleHooks }}
+ lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.lifecycleHooks "context" $) | nindent 12 }}
+ {{- else }}
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /bin/bash
+ - -c
+ - /opt/bitnami/scripts/start-scripts/prestop-sentinel.sh
+ {{- end }}
+ {{- end }}
+ {{- if .Values.sentinel.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.sentinel.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ {{- else if .Values.sentinel.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.command "context" $) | nindent 12 }}
+ {{- else }}
+ command:
+ - /bin/bash
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- else if .Values.sentinel.args }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.args "context" $) | nindent 12 }}
+ {{- else }}
+ args:
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-sentinel.sh
+ {{- end }}
+ env:
+ - name: BITNAMI_DEBUG
+ value: {{ ternary "true" "false" (or .Values.sentinel.image.debug .Values.diagnosticMode.enabled) | quote }}
+ {{- if .Values.auth.enabled }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: REDIS_PASSWORD_FILE
+ value: "/opt/bitnami/redis/secrets/redis-password"
+ {{- else }}
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "redis.secretName" . }}
+ key: {{ template "redis.secretPasswordKey" . }}
+ {{- end }}
+ {{- else }}
+ - name: ALLOW_EMPTY_PASSWORD
+ value: "yes"
+ {{- end }}
+ - name: REDIS_SENTINEL_TLS_ENABLED
+ value: {{ ternary "yes" "no" .Values.tls.enabled | quote }}
+ {{- if .Values.tls.enabled }}
+ - name: REDIS_SENTINEL_TLS_PORT_NUMBER
+ value: {{ .Values.sentinel.containerPorts.sentinel | quote }}
+ - name: REDIS_SENTINEL_TLS_AUTH_CLIENTS
+ value: {{ ternary "yes" "no" .Values.tls.authClients | quote }}
+ - name: REDIS_SENTINEL_TLS_CERT_FILE
+ value: {{ template "redis.tlsCert" . }}
+ - name: REDIS_SENTINEL_TLS_KEY_FILE
+ value: {{ template "redis.tlsCertKey" . }}
+ - name: REDIS_SENTINEL_TLS_CA_FILE
+ value: {{ template "redis.tlsCACert" . }}
+ {{- if .Values.tls.dhParamsFilename }}
+ - name: REDIS_SENTINEL_TLS_DH_PARAMS_FILE
+ value: {{ template "redis.tlsDHParams" . }}
+ {{- end }}
+ {{- else }}
+ - name: REDIS_SENTINEL_PORT
+ value: {{ .Values.sentinel.containerPorts.sentinel | quote }}
+ {{- end }}
+ {{- if .Values.sentinel.externalMaster.enabled }}
+ - name: REDIS_EXTERNAL_MASTER_HOST
+ value: {{ .Values.sentinel.externalMaster.host | quote }}
+ - name: REDIS_EXTERNAL_MASTER_PORT
+ value: {{ .Values.sentinel.externalMaster.port | quote }}
+ {{- end }}
+ {{- if .Values.sentinel.extraEnvVars }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.extraEnvVars "context" $ ) | nindent 12 }}
+ {{- end }}
+ {{- if or .Values.sentinel.extraEnvVarsCM .Values.sentinel.extraEnvVarsSecret }}
+ envFrom:
+ {{- if .Values.sentinel.extraEnvVarsCM }}
+ - configMapRef:
+ name: {{ .Values.sentinel.extraEnvVarsCM }}
+ {{- end }}
+ {{- if .Values.sentinel.extraEnvVarsSecret }}
+ - secretRef:
+ name: {{ .Values.sentinel.extraEnvVarsSecret }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - name: redis-sentinel
+ containerPort: {{ .Values.sentinel.containerPorts.sentinel }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.sentinel.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.sentinel.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.sentinel.startupProbe "enabled") "context" $) | nindent 12 }}
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }}
+ {{- end }}
+ {{- if .Values.sentinel.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.sentinel.livenessProbe.enabled }}
+ livenessProbe:
+ initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }}
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }}
+ {{- end }}
+ {{- end }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.sentinel.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.sentinel.readinessProbe.enabled }}
+ readinessProbe:
+ initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }}
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_sentinel.sh {{ .Values.sentinel.readinessProbe.timeoutSeconds }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.sentinel.resources }}
+ resources: {{- toYaml .Values.sentinel.resources | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: start-scripts
+ mountPath: /opt/bitnami/scripts/start-scripts
+ - name: health
+ mountPath: /health
+ - name: sentinel-data
+ mountPath: /opt/bitnami/redis-sentinel/etc
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: redis-password
+ mountPath: /opt/bitnami/redis/secrets/
+ {{- end }}
+ - name: redis-data
+ mountPath: {{ .Values.replica.persistence.path }}
+ {{- if .Values.replica.persistence.subPath }}
+ subPath: {{ .Values.replica.persistence.subPath }}
+ {{- else if .Values.replica.persistence.subPathExpr }}
+ subPathExpr: {{ .Values.replica.persistence.subPathExpr }}
+ {{- end }}
+ - name: config
+ mountPath: /opt/bitnami/redis-sentinel/mounted-etc
+ {{- if .Values.tls.enabled }}
+ - name: redis-certificates
+ mountPath: /opt/bitnami/redis/certs
+ readOnly: true
+ {{- end }}
+ {{- if .Values.sentinel.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.extraVolumeMounts "context" $ ) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.metrics.enabled }}
+ - name: metrics
+ image: {{ template "redis.metrics.image" . }}
+ imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
+ {{- if .Values.metrics.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ {{- else }}
+ command:
+ - /bin/bash
+ - -c
+ - |
+ if [[ -f '/secrets/redis-password' ]]; then
+ export REDIS_PASSWORD=$(cat /secrets/redis-password)
+ fi
+ redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- end }}
+ env:
+ - name: REDIS_ALIAS
+ value: {{ template "common.names.fullname" . }}
+ {{- if .Values.auth.enabled }}
+ - name: REDIS_USER
+ value: default
+ {{- if (not .Values.auth.usePasswordFiles) }}
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "redis.secretName" . }}
+ key: {{ template "redis.secretPasswordKey" . }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: REDIS_ADDR
+ value: rediss://{{ .Values.metrics.redisTargetHost }}:{{ .Values.replica.containerPorts.redis }}
+ {{- if .Values.tls.authClients }}
+ - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE
+ value: {{ template "redis.tlsCertKey" . }}
+ - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE
+ value: {{ template "redis.tlsCert" . }}
+ {{- end }}
+ - name: REDIS_EXPORTER_TLS_CA_CERT_FILE
+ value: {{ template "redis.tlsCACert" . }}
+ {{- end }}
+ {{- if .Values.metrics.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ ports:
+ - name: metrics
+ containerPort: 9121
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.metrics.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: metrics
+ {{- end }}
+ {{- if .Values.metrics.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: metrics
+ {{- end }}
+ {{- if .Values.metrics.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /
+ port: metrics
+ {{- end }}
+ {{- end }}
+ {{- if .Values.metrics.resources }}
+ resources: {{- toYaml .Values.metrics.resources | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: redis-password
+ mountPath: /secrets/
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: redis-certificates
+ mountPath: /opt/bitnami/redis/certs
+ readOnly: true
+ {{- end }}
+ {{- if .Values.metrics.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumeMounts "context" $ ) | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.replica.sidecars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.replica.sidecars "context" $) | nindent 8 }}
+ {{- end }}
+ {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.replica.persistence.enabled .Values.replica.podSecurityContext.enabled .Values.replica.containerSecurityContext.enabled }}
+ {{- if or .Values.replica.initContainers $needsVolumePermissions .Values.sysctl.enabled }}
+ initContainers:
+ {{- if .Values.replica.initContainers }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.replica.initContainers "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if $needsVolumePermissions }}
+ - name: volume-permissions
+ image: {{ include "redis.volumePermissions.image" . }}
+ imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.replica.persistence.path }}
+ {{- else }}
+ chown -R {{ .Values.replica.containerSecurityContext.runAsUser }}:{{ .Values.replica.podSecurityContext.fsGroup }} {{ .Values.replica.persistence.path }}
+ {{- end }}
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }}
+ {{- else }}
+ securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.volumePermissions.resources }}
+ resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: redis-data
+ mountPath: {{ .Values.replica.persistence.path }}
+ {{- if .Values.replica.persistence.subPath }}
+ subPath: {{ .Values.replica.persistence.subPath }}
+ {{- else if .Values.replica.persistence.subPathExpr }}
+ subPathExpr: {{ .Values.replica.persistence.subPathExpr }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.sysctl.enabled }}
+ - name: init-sysctl
+ image: {{ include "redis.sysctl.image" . }}
+ imagePullPolicy: {{ default "" .Values.sysctl.image.pullPolicy | quote }}
+ securityContext:
+ privileged: true
+ runAsUser: 0
+ {{- if .Values.sysctl.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.sysctl.command "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.sysctl.resources }}
+ resources: {{- toYaml .Values.sysctl.resources | nindent 12 }}
+ {{- end }}
+ {{- if .Values.sysctl.mountHostSys }}
+ volumeMounts:
+ - name: host-sys
+ mountPath: /host-sys
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ volumes:
+ - name: start-scripts
+ configMap:
+ name: {{ printf "%s-scripts" (include "common.names.fullname" .) }}
+ defaultMode: 0755
+ - name: health
+ configMap:
+ name: {{ printf "%s-health" (include "common.names.fullname" .) }}
+ defaultMode: 0755
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: redis-password
+ secret:
+ secretName: {{ template "redis.secretName" . }}
+ items:
+ - key: {{ template "redis.secretPasswordKey" . }}
+ path: redis-password
+ {{- end }}
+ - name: config
+ configMap:
+ name: {{ include "redis.configmapName" . }}
+ {{- if .Values.sysctl.mountHostSys }}
+ - name: host-sys
+ hostPath:
+ path: /sys
+ {{- end }}
+ {{- if not .Values.sentinel.persistence.enabled }}
+ - name: sentinel-data
+ {{- if or .Values.sentinel.persistence.medium .Values.sentinel.persistence.sizeLimit }}
+ emptyDir:
+ {{- if .Values.sentinel.persistence.medium }}
+ medium: {{ .Values.sentinel.persistence.medium | quote }}
+ {{- end }}
+ {{- if .Values.sentinel.persistence.sizeLimit }}
+ sizeLimit: {{ .Values.sentinel.persistence.sizeLimit | quote }}
+ {{- end }}
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ {{- end }}
+ - name: redis-tmp-conf
+ {{- if or .Values.sentinel.persistence.medium .Values.sentinel.persistence.sizeLimit }}
+ emptyDir:
+ {{- if .Values.sentinel.persistence.medium }}
+ medium: {{ .Values.sentinel.persistence.medium | quote }}
+ {{- end }}
+ {{- if .Values.sentinel.persistence.sizeLimit }}
+ sizeLimit: {{ .Values.sentinel.persistence.sizeLimit | quote }}
+ {{- end }}
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ - name: tmp
+ {{- if or .Values.sentinel.persistence.medium .Values.sentinel.persistence.sizeLimit }}
+ emptyDir:
+ {{- if .Values.sentinel.persistence.medium }}
+ medium: {{ .Values.sentinel.persistence.medium | quote }}
+ {{- end }}
+ {{- if .Values.sentinel.persistence.sizeLimit }}
+ sizeLimit: {{ .Values.sentinel.persistence.sizeLimit | quote }}
+ {{- end }}
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ {{- if .Values.replica.extraVolumes }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraVolumes "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.metrics.extraVolumes }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumes "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.sentinel.extraVolumes }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.extraVolumes "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: redis-certificates
+ secret:
+ secretName: {{ include "redis.tlsSecretName" . }}
+ defaultMode: 256
+ {{- end }}
+ {{- if not .Values.replica.persistence.enabled }}
+ - name: redis-data
+ {{- if or .Values.sentinel.persistence.medium .Values.sentinel.persistence.sizeLimit }}
+ emptyDir:
+ {{- if .Values.sentinel.persistence.medium }}
+ medium: {{ .Values.sentinel.persistence.medium | quote }}
+ {{- end }}
+ {{- if .Values.sentinel.persistence.sizeLimit }}
+ sizeLimit: {{ .Values.sentinel.persistence.sizeLimit | quote }}
+ {{- end }}
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ {{- else if .Values.replica.persistence.existingClaim }}
+ - name: redis-data
+ persistentVolumeClaim:
+ claimName: {{ printf "%s" (tpl .Values.replica.persistence.existingClaim .) }}
+ {{- else }}
+ volumeClaimTemplates:
+ - metadata:
+ name: redis-data
+ labels: {{- include "common.labels.matchLabels" . | nindent 10 }}
+ app.kubernetes.io/component: node
+ {{- if .Values.replica.persistence.annotations }}
+ annotations: {{- toYaml .Values.replica.persistence.annotations | nindent 10 }}
+ {{- end }}
+ spec:
+ accessModes:
+ {{- range .Values.replica.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.replica.persistence.size | quote }}
+ {{- if .Values.replica.persistence.selector }}
+ selector: {{- include "common.tplvalues.render" ( dict "value" .Values.replica.persistence.selector "context" $) | nindent 10 }}
+ {{- end }}
+ {{- include "common.storage.class" (dict "persistence" .Values.replica.persistence "global" .Values.global) | nindent 8 }}
+ {{- if .Values.sentinel.persistence.enabled }}
+ - metadata:
+ name: sentinel-data
+ labels: {{- include "common.labels.matchLabels" . | nindent 10 }}
+ app.kubernetes.io/component: node
+ {{- if .Values.sentinel.persistence.annotations }}
+ annotations: {{- toYaml .Values.sentinel.persistence.annotations | nindent 10 }}
+ {{- end }}
+ spec:
+ accessModes:
+ {{- range .Values.sentinel.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.sentinel.persistence.size | quote }}
+ {{- if .Values.sentinel.persistence.selector }}
+ selector: {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.persistence.selector "context" $) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.sentinel.persistence.dataSource }}
+ dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.persistence.dataSource "context" $) | nindent 10 }}
+ {{- end }}
+ {{- include "common.storage.class" (dict "persistence" .Values.sentinel.persistence "global" .Values.global) | nindent 8 }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/serviceaccount.yaml b/charts/penpot/charts/redis/templates/serviceaccount.yaml
new file mode 100644
index 0000000..9faa175
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/serviceaccount.yaml
@@ -0,0 +1,21 @@
+{{- if and .Values.serviceAccount.create (and (not .Values.master.serviceAccount.create) (not .Values.replica.serviceAccount.create)) }}
+apiVersion: v1
+kind: ServiceAccount
+automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
+metadata:
+ name: {{ template "redis.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.commonAnnotations .Values.serviceAccount.annotations }}
+ annotations:
+ {{- if or .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.serviceAccount.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/penpot/charts/redis/templates/servicemonitor.yaml b/charts/penpot/charts/redis/templates/servicemonitor.yaml
new file mode 100644
index 0000000..9bdad94
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/servicemonitor.yaml
@@ -0,0 +1,44 @@
+{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: {{ template "common.names.fullname" . }}
+ namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.metrics.serviceMonitor.additionalLabels }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ endpoints:
+ - port: http-metrics
+ {{- if .Values.metrics.serviceMonitor.interval }}
+ interval: {{ .Values.metrics.serviceMonitor.interval }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
+ scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.honorLabels }}
+ honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.relabellings }}
+ relabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.metricRelabelings }}
+ metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.metricRelabelings | nindent 6 }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.podTargetLabels }}
+ podTargetLabels: {{- toYaml .Values.metrics.serviceMonitor.podTargetLabels | nindent 4 }}
+ {{- end }}
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ app.kubernetes.io/component: metrics
+{{- end }}
diff --git a/charts/penpot/charts/redis/templates/tls-secret.yaml b/charts/penpot/charts/redis/templates/tls-secret.yaml
new file mode 100644
index 0000000..4f9c39b
--- /dev/null
+++ b/charts/penpot/charts/redis/templates/tls-secret.yaml
@@ -0,0 +1,30 @@
+{{- if (include "redis.createTlsSecret" .) }}
+{{- $secretName := printf "%s-crt" (include "common.names.fullname" .) }}
+{{- $existingCerts := (lookup "v1" "Secret" .Release.Namespace $secretName).data | default dict }}
+{{- $ca := genCA "redis-ca" 365 }}
+{{- $releaseNamespace := .Release.Namespace }}
+{{- $clusterDomain := .Values.clusterDomain }}
+{{- $fullname := include "common.names.fullname" . }}
+{{- $serviceName := include "common.names.fullname" . }}
+{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }}
+{{- $masterServiceName := printf "%s-master" (include "common.names.fullname" .) }}
+{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $masterServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $masterServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) "127.0.0.1" "localhost" $fullname }}
+{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ $secretName }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+type: kubernetes.io/tls
+data:
+ ca.crt: {{ (get $existingCerts "ca.crt") | default ($ca.Cert | b64enc | quote ) }}
+ tls.crt: {{ (get $existingCerts "tls.crt") | default ($crt.Cert | b64enc | quote) }}
+ tls.key: {{ (get $existingCerts "tls.key") | default ($crt.Key | b64enc | quote) }}
+{{- end }}
diff --git a/charts/penpot/charts/redis/values.schema.json b/charts/penpot/charts/redis/values.schema.json
new file mode 100644
index 0000000..d6e226b
--- /dev/null
+++ b/charts/penpot/charts/redis/values.schema.json
@@ -0,0 +1,156 @@
+{
+ "$schema": "http://json-schema.org/schema#",
+ "type": "object",
+ "properties": {
+ "architecture": {
+ "type": "string",
+ "title": "Redis architecture",
+ "form": true,
+ "description": "Allowed values: `standalone` or `replication`",
+ "enum": ["standalone", "replication"]
+ },
+ "auth": {
+ "type": "object",
+ "title": "Authentication configuration",
+ "form": true,
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "form": true,
+ "title": "Use password authentication"
+ },
+ "password": {
+ "type": "string",
+ "title": "Redis password",
+ "form": true,
+ "description": "Defaults to a random 10-character alphanumeric string if not set",
+ "hidden": {
+ "value": false,
+ "path": "auth/enabled"
+ }
+ }
+ }
+ },
+ "master": {
+ "type": "object",
+ "title": "Master replicas settings",
+ "form": true,
+ "properties": {
+ "kind": {
+ "type": "string",
+ "title": "Workload Kind",
+ "form": true,
+ "description": "Allowed values: `Deployment` or `StatefulSet`",
+ "enum": ["Deployment", "StatefulSet"]
+ },
+ "persistence": {
+ "type": "object",
+ "title": "Persistence for master replicas",
+ "form": true,
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "form": true,
+ "title": "Enable persistence",
+ "description": "Enable persistence using Persistent Volume Claims"
+ },
+ "size": {
+ "type": "string",
+ "title": "Persistent Volume Size",
+ "form": true,
+ "render": "slider",
+ "sliderMin": 1,
+ "sliderMax": 100,
+ "sliderUnit": "Gi",
+ "hidden": {
+ "value": false,
+ "path": "master/persistence/enabled"
+ }
+ }
+ }
+ }
+ }
+ },
+ "replica": {
+ "type": "object",
+ "title": "Redis replicas settings",
+ "form": true,
+ "hidden": {
+ "value": "standalone",
+ "path": "architecture"
+ },
+ "properties": {
+ "replicaCount": {
+ "type": "integer",
+ "form": true,
+ "title": "Number of Redis replicas"
+ },
+ "persistence": {
+ "type": "object",
+ "title": "Persistence for Redis replicas",
+ "form": true,
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "form": true,
+ "title": "Enable persistence",
+ "description": "Enable persistence using Persistent Volume Claims"
+ },
+ "size": {
+ "type": "string",
+ "title": "Persistent Volume Size",
+ "form": true,
+ "render": "slider",
+ "sliderMin": 1,
+ "sliderMax": 100,
+ "sliderUnit": "Gi",
+ "hidden": {
+ "value": false,
+ "path": "replica/persistence/enabled"
+ }
+ }
+ }
+ }
+ }
+ },
+ "volumePermissions": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "form": true,
+ "title": "Enable Init Containers",
+ "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination"
+ }
+ }
+ },
+ "metrics": {
+ "type": "object",
+ "form": true,
+ "title": "Prometheus metrics details",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "title": "Create Prometheus metrics exporter",
+ "description": "Create a side-car container to expose Prometheus metrics",
+ "form": true
+ },
+ "serviceMonitor": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "title": "Create Prometheus Operator ServiceMonitor",
+ "description": "Create a ServiceMonitor to track metrics using Prometheus Operator",
+ "form": true,
+ "hidden": {
+ "value": false,
+ "path": "metrics/enabled"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/charts/penpot/charts/redis/values.yaml b/charts/penpot/charts/redis/values.yaml
new file mode 100644
index 0000000..2bbc7e4
--- /dev/null
+++ b/charts/penpot/charts/redis/values.yaml
@@ -0,0 +1,1743 @@
+## @section Global parameters
+## Global Docker image parameters
+## Please, note that this will override the image parameters, including dependencies, configured to use the global value
+## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
+##
+
+## @param global.imageRegistry Global Docker image registry
+## @param global.imagePullSecrets Global Docker registry secret names as an array
+## @param global.storageClass Global StorageClass for Persistent Volume(s)
+## @param global.redis.password Global Redis® password (overrides `auth.password`)
+##
+global:
+ imageRegistry: ""
+ ## E.g.
+ ## imagePullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ imagePullSecrets: []
+ storageClass: ""
+ redis:
+ password: ""
+
+## @section Common parameters
+##
+
+## @param kubeVersion Override Kubernetes version
+##
+kubeVersion: ""
+## @param nameOverride String to partially override common.names.fullname
+##
+nameOverride: ""
+## @param fullnameOverride String to fully override common.names.fullname
+##
+fullnameOverride: ""
+## @param commonLabels Labels to add to all deployed objects
+##
+commonLabels: {}
+## @param commonAnnotations Annotations to add to all deployed objects
+##
+commonAnnotations: {}
+## @param secretAnnotations Annotations to add to secret
+##
+secretAnnotations: {}
+## @param clusterDomain Kubernetes cluster domain name
+##
+clusterDomain: cluster.local
+## @param extraDeploy Array of extra objects to deploy with the release
+##
+extraDeploy: []
+
+## Enable diagnostic mode in the deployment
+##
+diagnosticMode:
+ ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
+ ##
+ enabled: false
+ ## @param diagnosticMode.command Command to override all containers in the deployment
+ ##
+ command:
+ - sleep
+ ## @param diagnosticMode.args Args to override all containers in the deployment
+ ##
+ args:
+ - infinity
+
+## @section Redis® Image parameters
+##
+
+## Bitnami Redis® image
+## ref: https://hub.docker.com/r/bitnami/redis/tags/
+## @param image.registry Redis® image registry
+## @param image.repository Redis® image repository
+## @param image.tag Redis® image tag (immutable tags are recommended)
+## @param image.digest Redis® image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+## @param image.pullPolicy Redis® image pull policy
+## @param image.pullSecrets Redis® image pull secrets
+## @param image.debug Enable image debug mode
+##
+image:
+ registry: docker.io
+ repository: bitnami/redis
+ tag: 7.0.8-debian-11-r0
+ digest: ""
+ ## Specify a imagePullPolicy
+ ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+ ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+ ##
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## e.g:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+ ## Enable debug mode
+ ##
+ debug: false
+
+## @section Redis® common configuration parameters
+## https://github.com/bitnami/containers/tree/main/bitnami/redis#configuration
+##
+
+## @param architecture Redis® architecture. Allowed values: `standalone` or `replication`
+##
+architecture: replication
+## Redis® Authentication parameters
+## ref: https://github.com/bitnami/containers/tree/main/bitnami/redis#setting-the-server-password-on-first-run
+##
+auth:
+ ## @param auth.enabled Enable password authentication
+ ##
+ enabled: true
+ ## @param auth.sentinel Enable password authentication on sentinels too
+ ##
+ sentinel: true
+ ## @param auth.password Redis® password
+ ## Defaults to a random 10-character alphanumeric string if not set
+ ##
+ password: ""
+ ## @param auth.existingSecret The name of an existing secret with Redis® credentials
+ ## NOTE: When it's set, the previous `auth.password` parameter is ignored
+ ##
+ existingSecret: ""
+ ## @param auth.existingSecretPasswordKey Password key to be retrieved from existing secret
+ ## NOTE: ignored unless `auth.existingSecret` parameter is set
+ ##
+ existingSecretPasswordKey: ""
+ ## @param auth.usePasswordFiles Mount credentials as files instead of using an environment variable
+ ##
+ usePasswordFiles: false
+
+## @param commonConfiguration [string] Common configuration to be added into the ConfigMap
+## ref: https://redis.io/topics/config
+##
+commonConfiguration: |-
+ # Enable AOF https://redis.io/topics/persistence#append-only-file
+ appendonly yes
+ # Disable RDB persistence, AOF persistence already enabled.
+ save ""
+## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for Redis® nodes
+##
+existingConfigmap: ""
+
+## @section Redis® master configuration parameters
+##
+
+master:
+ ## @param master.count Number of Redis® master instances to deploy (experimental, requires additional configuration)
+ ##
+ count: 1
+ ## @param master.configuration Configuration for Redis® master nodes
+ ## ref: https://redis.io/topics/config
+ ##
+ configuration: ""
+ ## @param master.disableCommands Array with Redis® commands to disable on master nodes
+ ## Commands will be completely disabled by renaming each to an empty string.
+ ## ref: https://redis.io/topics/security#disabling-of-specific-commands
+ ##
+ disableCommands:
+ - FLUSHDB
+ - FLUSHALL
+ ## @param master.command Override default container command (useful when using custom images)
+ ##
+ command: []
+ ## @param master.args Override default container args (useful when using custom images)
+ ##
+ args: []
+ ## @param master.preExecCmds Additional commands to run prior to starting Redis® master
+ ##
+ preExecCmds: []
+ ## @param master.extraFlags Array with additional command line flags for Redis® master
+ ## e.g:
+ ## extraFlags:
+ ## - "--maxmemory-policy volatile-ttl"
+ ## - "--repl-backlog-size 1024mb"
+ ##
+ extraFlags: []
+ ## @param master.extraEnvVars Array with extra environment variables to add to Redis® master nodes
+ ## e.g:
+ ## extraEnvVars:
+ ## - name: FOO
+ ## value: "bar"
+ ##
+ extraEnvVars: []
+ ## @param master.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® master nodes
+ ##
+ extraEnvVarsCM: ""
+ ## @param master.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® master nodes
+ ##
+ extraEnvVarsSecret: ""
+ ## @param master.containerPorts.redis Container port to open on Redis® master nodes
+ ##
+ containerPorts:
+ redis: 6379
+ ## Configure extra options for Redis® containers' liveness and readiness probes
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
+ ## @param master.startupProbe.enabled Enable startupProbe on Redis® master nodes
+ ## @param master.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
+ ## @param master.startupProbe.periodSeconds Period seconds for startupProbe
+ ## @param master.startupProbe.timeoutSeconds Timeout seconds for startupProbe
+ ## @param master.startupProbe.failureThreshold Failure threshold for startupProbe
+ ## @param master.startupProbe.successThreshold Success threshold for startupProbe
+ ##
+ startupProbe:
+ enabled: false
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 5
+ ## @param master.livenessProbe.enabled Enable livenessProbe on Redis® master nodes
+ ## @param master.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+ ## @param master.livenessProbe.periodSeconds Period seconds for livenessProbe
+ ## @param master.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+ ## @param master.livenessProbe.failureThreshold Failure threshold for livenessProbe
+ ## @param master.livenessProbe.successThreshold Success threshold for livenessProbe
+ ##
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 5
+ ## @param master.readinessProbe.enabled Enable readinessProbe on Redis® master nodes
+ ## @param master.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+ ## @param master.readinessProbe.periodSeconds Period seconds for readinessProbe
+ ## @param master.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+ ## @param master.readinessProbe.failureThreshold Failure threshold for readinessProbe
+ ## @param master.readinessProbe.successThreshold Success threshold for readinessProbe
+ ##
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 5
+ ## @param master.customStartupProbe Custom startupProbe that overrides the default one
+ ##
+ customStartupProbe: {}
+ ## @param master.customLivenessProbe Custom livenessProbe that overrides the default one
+ ##
+ customLivenessProbe: {}
+ ## @param master.customReadinessProbe Custom readinessProbe that overrides the default one
+ ##
+ customReadinessProbe: {}
+ ## Redis® master resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param master.resources.limits The resources limits for the Redis® master containers
+ ## @param master.resources.requests The requested resources for the Redis® master containers
+ ##
+ resources:
+ limits: {}
+ requests: {}
+ ## Configure Pods Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+ ## @param master.podSecurityContext.enabled Enabled Redis® master pods' Security Context
+ ## @param master.podSecurityContext.fsGroup Set Redis® master pod's Security Context fsGroup
+ ##
+ podSecurityContext:
+ enabled: true
+ fsGroup: 1001
+ ## Configure Container Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+ ## @param master.containerSecurityContext.enabled Enabled Redis® master containers' Security Context
+ ## @param master.containerSecurityContext.runAsUser Set Redis® master containers' Security Context runAsUser
+ ##
+ containerSecurityContext:
+ enabled: true
+ runAsUser: 1001
+ ## @param master.kind Use either Deployment or StatefulSet (default)
+ ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/
+ ##
+ kind: StatefulSet
+ ## @param master.schedulerName Alternate scheduler for Redis® master pods
+ ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+ ##
+ schedulerName: ""
+ ## @param master.updateStrategy.type Redis® master statefulset strategy type
+ ## @skip master.updateStrategy.rollingUpdate
+ ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+ ##
+ updateStrategy:
+ ## StrategyType
+ ## Can be set to RollingUpdate, OnDelete (statefulset), Recreate (deployment)
+ ##
+ type: RollingUpdate
+ ## @param master.minReadySeconds How many seconds a pod needs to be ready before killing the next, during update
+ ##
+ minReadySeconds: 0
+ ## @param master.priorityClassName Redis® master pods' priorityClassName
+ ##
+ priorityClassName: ""
+ ## @param master.hostAliases Redis® master pods host aliases
+ ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
+ ##
+ hostAliases: []
+ ## @param master.podLabels Extra labels for Redis® master pods
+ ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+ ##
+ podLabels: {}
+ ## @param master.podAnnotations Annotations for Redis® master pods
+ ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+ ##
+ podAnnotations: {}
+ ## @param master.shareProcessNamespace Share a single process namespace between all of the containers in Redis® master pods
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
+ ##
+ shareProcessNamespace: false
+ ## @param master.podAffinityPreset Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAffinityPreset: ""
+ ## @param master.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAntiAffinityPreset: soft
+ ## Node master.affinity preset
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+ ##
+ nodeAffinityPreset:
+ ## @param master.nodeAffinityPreset.type Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard`
+ ##
+ type: ""
+ ## @param master.nodeAffinityPreset.key Node label key to match. Ignored if `master.affinity` is set
+ ##
+ key: ""
+ ## @param master.nodeAffinityPreset.values Node label values to match. Ignored if `master.affinity` is set
+ ## E.g.
+ ## values:
+ ## - e2e-az1
+ ## - e2e-az2
+ ##
+ values: []
+ ## @param master.affinity Affinity for Redis® master pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ## NOTE: `master.podAffinityPreset`, `master.podAntiAffinityPreset`, and `master.nodeAffinityPreset` will be ignored when it's set
+ ##
+ affinity: {}
+ ## @param master.nodeSelector Node labels for Redis® master pods assignment
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+ ## @param master.tolerations Tolerations for Redis® master pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ ## @param master.topologySpreadConstraints Spread Constraints for Redis® master pod assignment
+ ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+ ## E.g.
+ ## topologySpreadConstraints:
+ ## - maxSkew: 1
+ ## topologyKey: node
+ ## whenUnsatisfiable: DoNotSchedule
+ ##
+ topologySpreadConstraints: []
+ ## @param master.dnsPolicy DNS Policy for Redis® master pod
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
+ ## E.g.
+ ## dnsPolicy: ClusterFirst
+ dnsPolicy: ""
+ ## @param master.dnsConfig DNS Configuration for Redis® master pod
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
+ ## E.g.
+ ## dnsConfig:
+ ## options:
+ ## - name: ndots
+ ## value: "4"
+ ## - name: single-request-reopen
+ dnsConfig: {}
+ ## @param master.lifecycleHooks for the Redis® master container(s) to automate configuration before or after startup
+ ##
+ lifecycleHooks: {}
+ ## @param master.extraVolumes Optionally specify extra list of additional volumes for the Redis® master pod(s)
+ ##
+ extraVolumes: []
+ ## @param master.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® master container(s)
+ ##
+ extraVolumeMounts: []
+ ## @param master.sidecars Add additional sidecar containers to the Redis® master pod(s)
+ ## e.g:
+ ## sidecars:
+ ## - name: your-image-name
+ ## image: your-image
+ ## imagePullPolicy: Always
+ ## ports:
+ ## - name: portname
+ ## containerPort: 1234
+ ##
+ sidecars: []
+ ## @param master.initContainers Add additional init containers to the Redis® master pod(s)
+ ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
+ ## e.g:
+ ## initContainers:
+ ## - name: your-image-name
+ ## image: your-image
+ ## imagePullPolicy: Always
+ ## command: ['sh', '-c', 'echo "hello world"']
+ ##
+ initContainers: []
+ ## Persistence parameters
+ ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/
+ ##
+ persistence:
+ ## @param master.persistence.enabled Enable persistence on Redis® master nodes using Persistent Volume Claims
+ ##
+ enabled: true
+ ## @param master.persistence.medium Provide a medium for `emptyDir` volumes.
+ ##
+ medium: ""
+ ## @param master.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes.
+ ##
+ sizeLimit: ""
+ ## @param master.persistence.path The path the volume will be mounted at on Redis® master containers
+ ## NOTE: Useful when using different Redis® images
+ ##
+ path: /data
+ ## @param master.persistence.subPath The subdirectory of the volume to mount on Redis® master containers
+ ## NOTE: Useful in dev environments
+ ##
+ subPath: ""
+ ## @param master.persistence.subPathExpr Used to construct the subPath subdirectory of the volume to mount on Redis® master containers
+ ##
+ subPathExpr: ""
+ ## @param master.persistence.storageClass Persistent Volume storage class
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner
+ ##
+ storageClass: ""
+ ## @param master.persistence.accessModes Persistent Volume access modes
+ ##
+ accessModes:
+ - ReadWriteOnce
+ ## @param master.persistence.size Persistent Volume size
+ ##
+ size: 8Gi
+ ## @param master.persistence.annotations Additional custom annotations for the PVC
+ ##
+ annotations: {}
+ ## @param master.persistence.selector Additional labels to match for the PVC
+ ## e.g:
+ ## selector:
+ ## matchLabels:
+ ## app: my-app
+ ##
+ selector: {}
+ ## @param master.persistence.dataSource Custom PVC data source
+ ##
+ dataSource: {}
+ ## @param master.persistence.existingClaim Use a existing PVC which must be created manually before bound
+ ## NOTE: requires master.persistence.enabled: true
+ ##
+ existingClaim: ""
+ ## Redis® master service parameters
+ ##
+ service:
+ ## @param master.service.type Redis® master service type
+ ##
+ type: ClusterIP
+ ## @param master.service.ports.redis Redis® master service port
+ ##
+ ports:
+ redis: 6379
+ ## @param master.service.nodePorts.redis Node port for Redis® master
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ ## NOTE: choose port between <30000-32767>
+ ##
+ nodePorts:
+ redis: ""
+ ## @param master.service.externalTrafficPolicy Redis® master service external traffic policy
+ ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ ##
+ externalTrafficPolicy: Cluster
+ ## @param master.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
+ ##
+ extraPorts: []
+ ## @param master.service.internalTrafficPolicy Redis® master service internal traffic policy (requires Kubernetes v1.22 or greater to be usable)
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service-traffic-policy/
+ ##
+ internalTrafficPolicy: Cluster
+ ## @param master.service.clusterIP Redis® master service Cluster IP
+ ##
+ clusterIP: ""
+ ## @param master.service.loadBalancerIP Redis® master service Load Balancer IP
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+ ##
+ loadBalancerIP: ""
+ ## @param master.service.loadBalancerSourceRanges Redis® master service Load Balancer sources
+ ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
+ ## e.g.
+ ## loadBalancerSourceRanges:
+ ## - 10.10.10.0/24
+ ##
+ loadBalancerSourceRanges: []
+ ## @param master.service.externalIPs Redis® master service External IPs
+ ## https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
+ ## e.g.
+ ## externalIPs:
+ ## - 10.10.10.1
+ ## - 201.22.30.1
+ ##
+ externalIPs: []
+ ## @param master.service.annotations Additional custom annotations for Redis® master service
+ ##
+ annotations: {}
+ ## @param master.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
+ ## If "ClientIP", consecutive client requests will be directed to the same Pod
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ ##
+ sessionAffinity: None
+ ## @param master.service.sessionAffinityConfig Additional settings for the sessionAffinity
+ ## sessionAffinityConfig:
+ ## clientIP:
+ ## timeoutSeconds: 300
+ ##
+ sessionAffinityConfig: {}
+ ## @param master.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-master pods
+ ##
+ terminationGracePeriodSeconds: 30
+ ## ServiceAccount configuration
+ ##
+ serviceAccount:
+ ## @param master.serviceAccount.create Specifies whether a ServiceAccount should be created
+ ##
+ create: false
+ ## @param master.serviceAccount.name The name of the ServiceAccount to use.
+ ## If not set and create is true, a name is generated using the common.names.fullname template
+ ##
+ name: ""
+ ## @param master.serviceAccount.automountServiceAccountToken Whether to auto mount the service account token
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server
+ ##
+ automountServiceAccountToken: true
+ ## @param master.serviceAccount.annotations Additional custom annotations for the ServiceAccount
+ ##
+ annotations: {}
+
+## @section Redis® replicas configuration parameters
+##
+
+replica:
+ ## @param replica.replicaCount Number of Redis® replicas to deploy
+ ##
+ replicaCount: 3
+ ## @param replica.configuration Configuration for Redis® replicas nodes
+ ## ref: https://redis.io/topics/config
+ ##
+ configuration: ""
+ ## @param replica.disableCommands Array with Redis® commands to disable on replicas nodes
+ ## Commands will be completely disabled by renaming each to an empty string.
+ ## ref: https://redis.io/topics/security#disabling-of-specific-commands
+ ##
+ disableCommands:
+ - FLUSHDB
+ - FLUSHALL
+ ## @param replica.command Override default container command (useful when using custom images)
+ ##
+ command: []
+ ## @param replica.args Override default container args (useful when using custom images)
+ ##
+ args: []
+ ## @param replica.preExecCmds Additional commands to run prior to starting Redis® replicas
+ ##
+ preExecCmds: []
+ ## @param replica.extraFlags Array with additional command line flags for Redis® replicas
+ ## e.g:
+ ## extraFlags:
+ ## - "--maxmemory-policy volatile-ttl"
+ ## - "--repl-backlog-size 1024mb"
+ ##
+ extraFlags: []
+ ## @param replica.extraEnvVars Array with extra environment variables to add to Redis® replicas nodes
+ ## e.g:
+ ## extraEnvVars:
+ ## - name: FOO
+ ## value: "bar"
+ ##
+ extraEnvVars: []
+ ## @param replica.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® replicas nodes
+ ##
+ extraEnvVarsCM: ""
+ ## @param replica.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® replicas nodes
+ ##
+ extraEnvVarsSecret: ""
+ ## @param replica.externalMaster.enabled Use external master for bootstrapping
+ ## @param replica.externalMaster.host External master host to bootstrap from
+ ## @param replica.externalMaster.port Port for Redis service external master host
+ ##
+ externalMaster:
+ enabled: false
+ host: ""
+ port: 6379
+ ## @param replica.containerPorts.redis Container port to open on Redis® replicas nodes
+ ##
+ containerPorts:
+ redis: 6379
+ ## Configure extra options for Redis® containers' liveness and readiness probes
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
+ ## @param replica.startupProbe.enabled Enable startupProbe on Redis® replicas nodes
+ ## @param replica.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
+ ## @param replica.startupProbe.periodSeconds Period seconds for startupProbe
+ ## @param replica.startupProbe.timeoutSeconds Timeout seconds for startupProbe
+ ## @param replica.startupProbe.failureThreshold Failure threshold for startupProbe
+ ## @param replica.startupProbe.successThreshold Success threshold for startupProbe
+ ##
+ startupProbe:
+ enabled: true
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 22
+ ## @param replica.livenessProbe.enabled Enable livenessProbe on Redis® replicas nodes
+ ## @param replica.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+ ## @param replica.livenessProbe.periodSeconds Period seconds for livenessProbe
+ ## @param replica.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+ ## @param replica.livenessProbe.failureThreshold Failure threshold for livenessProbe
+ ## @param replica.livenessProbe.successThreshold Success threshold for livenessProbe
+ ##
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 5
+ ## @param replica.readinessProbe.enabled Enable readinessProbe on Redis® replicas nodes
+ ## @param replica.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+ ## @param replica.readinessProbe.periodSeconds Period seconds for readinessProbe
+ ## @param replica.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+ ## @param replica.readinessProbe.failureThreshold Failure threshold for readinessProbe
+ ## @param replica.readinessProbe.successThreshold Success threshold for readinessProbe
+ ##
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 5
+ ## @param replica.customStartupProbe Custom startupProbe that overrides the default one
+ ##
+ customStartupProbe: {}
+ ## @param replica.customLivenessProbe Custom livenessProbe that overrides the default one
+ ##
+ customLivenessProbe: {}
+ ## @param replica.customReadinessProbe Custom readinessProbe that overrides the default one
+ ##
+ customReadinessProbe: {}
+ ## Redis® replicas resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param replica.resources.limits The resources limits for the Redis® replicas containers
+ ## @param replica.resources.requests The requested resources for the Redis® replicas containers
+ ##
+ resources:
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ limits: {}
+ # cpu: 250m
+ # memory: 256Mi
+ requests: {}
+ # cpu: 250m
+ # memory: 256Mi
+ ## Configure Pods Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+ ## @param replica.podSecurityContext.enabled Enabled Redis® replicas pods' Security Context
+ ## @param replica.podSecurityContext.fsGroup Set Redis® replicas pod's Security Context fsGroup
+ ##
+ podSecurityContext:
+ enabled: true
+ fsGroup: 1001
+ ## Configure Container Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+ ## @param replica.containerSecurityContext.enabled Enabled Redis® replicas containers' Security Context
+ ## @param replica.containerSecurityContext.runAsUser Set Redis® replicas containers' Security Context runAsUser
+ ##
+ containerSecurityContext:
+ enabled: true
+ runAsUser: 1001
+ ## @param replica.schedulerName Alternate scheduler for Redis® replicas pods
+ ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+ ##
+ schedulerName: ""
+ ## @param replica.updateStrategy.type Redis® replicas statefulset strategy type
+ ## @skip replica.updateStrategy.rollingUpdate
+ ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+ ##
+ updateStrategy:
+ ## StrategyType
+ ## Can be set to RollingUpdate, OnDelete (statefulset), Recreate (deployment)
+ ##
+ type: RollingUpdate
+ ## @param replica.minReadySeconds How many seconds a pod needs to be ready before killing the next, during update
+ ##
+ minReadySeconds: 0
+ ## @param replica.priorityClassName Redis® replicas pods' priorityClassName
+ ##
+ priorityClassName: ""
+ ## @param replica.podManagementPolicy podManagementPolicy to manage scaling operation of %%MAIN_CONTAINER_NAME%% pods
+ ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
+ ##
+ podManagementPolicy: ""
+ ## @param replica.hostAliases Redis® replicas pods host aliases
+ ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
+ ##
+ hostAliases: []
+ ## @param replica.podLabels Extra labels for Redis® replicas pods
+ ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+ ##
+ podLabels: {}
+ ## @param replica.podAnnotations Annotations for Redis® replicas pods
+ ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+ ##
+ podAnnotations: {}
+ ## @param replica.shareProcessNamespace Share a single process namespace between all of the containers in Redis® replicas pods
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
+ ##
+ shareProcessNamespace: false
+ ## @param replica.podAffinityPreset Pod affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAffinityPreset: ""
+ ## @param replica.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAntiAffinityPreset: soft
+ ## Node affinity preset
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+ ##
+ nodeAffinityPreset:
+ ## @param replica.nodeAffinityPreset.type Node affinity preset type. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard`
+ ##
+ type: ""
+ ## @param replica.nodeAffinityPreset.key Node label key to match. Ignored if `replica.affinity` is set
+ ##
+ key: ""
+ ## @param replica.nodeAffinityPreset.values Node label values to match. Ignored if `replica.affinity` is set
+ ## E.g.
+ ## values:
+ ## - e2e-az1
+ ## - e2e-az2
+ ##
+ values: []
+ ## @param replica.affinity Affinity for Redis® replicas pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ## NOTE: `replica.podAffinityPreset`, `replica.podAntiAffinityPreset`, and `replica.nodeAffinityPreset` will be ignored when it's set
+ ##
+ affinity: {}
+ ## @param replica.nodeSelector Node labels for Redis® replicas pods assignment
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+ ## @param replica.tolerations Tolerations for Redis® replicas pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ ## @param replica.topologySpreadConstraints Spread Constraints for Redis® replicas pod assignment
+ ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+ ## E.g.
+ ## topologySpreadConstraints:
+ ## - maxSkew: 1
+ ## topologyKey: node
+ ## whenUnsatisfiable: DoNotSchedule
+ ##
+ topologySpreadConstraints: []
+ ## @param replica.dnsPolicy DNS Policy for Redis® replica pods
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
+ ## E.g.
+ ## dnsPolicy: ClusterFirst
+ dnsPolicy: ""
+ ## @param replica.dnsConfig DNS Configuration for Redis® replica pods
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
+ ## E.g.
+ ## dnsConfig:
+ ## options:
+ ## - name: ndots
+ ## value: "4"
+ ## - name: single-request-reopen
+ dnsConfig: {}
+ ## @param replica.lifecycleHooks for the Redis® replica container(s) to automate configuration before or after startup
+ ##
+ lifecycleHooks: {}
+ ## @param replica.extraVolumes Optionally specify extra list of additional volumes for the Redis® replicas pod(s)
+ ##
+ extraVolumes: []
+ ## @param replica.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® replicas container(s)
+ ##
+ extraVolumeMounts: []
+ ## @param replica.sidecars Add additional sidecar containers to the Redis® replicas pod(s)
+ ## e.g:
+ ## sidecars:
+ ## - name: your-image-name
+ ## image: your-image
+ ## imagePullPolicy: Always
+ ## ports:
+ ## - name: portname
+ ## containerPort: 1234
+ ##
+ sidecars: []
+ ## @param replica.initContainers Add additional init containers to the Redis® replicas pod(s)
+ ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
+ ## e.g:
+ ## initContainers:
+ ## - name: your-image-name
+ ## image: your-image
+ ## imagePullPolicy: Always
+ ## command: ['sh', '-c', 'echo "hello world"']
+ ##
+ initContainers: []
+ ## Persistence Parameters
+ ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/
+ ##
+ persistence:
+ ## @param replica.persistence.enabled Enable persistence on Redis® replicas nodes using Persistent Volume Claims
+ ##
+ enabled: true
+ ## @param replica.persistence.medium Provide a medium for `emptyDir` volumes.
+ ##
+ medium: ""
+ ## @param replica.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes.
+ ##
+ sizeLimit: ""
+ ## @param replica.persistence.path The path the volume will be mounted at on Redis® replicas containers
+ ## NOTE: Useful when using different Redis® images
+ ##
+ path: /data
+ ## @param replica.persistence.subPath The subdirectory of the volume to mount on Redis® replicas containers
+ ## NOTE: Useful in dev environments
+ ##
+ subPath: ""
+ ## @param replica.persistence.subPathExpr Used to construct the subPath subdirectory of the volume to mount on Redis® replicas containers
+ ##
+ subPathExpr: ""
+ ## @param replica.persistence.storageClass Persistent Volume storage class
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner
+ ##
+ storageClass: ""
+ ## @param replica.persistence.accessModes Persistent Volume access modes
+ ##
+ accessModes:
+ - ReadWriteOnce
+ ## @param replica.persistence.size Persistent Volume size
+ ##
+ size: 8Gi
+ ## @param replica.persistence.annotations Additional custom annotations for the PVC
+ ##
+ annotations: {}
+ ## @param replica.persistence.selector Additional labels to match for the PVC
+ ## e.g:
+ ## selector:
+ ## matchLabels:
+ ## app: my-app
+ ##
+ selector: {}
+ ## @param replica.persistence.dataSource Custom PVC data source
+ ##
+ dataSource: {}
+ ## @param replica.persistence.existingClaim Use a existing PVC which must be created manually before bound
+ ## NOTE: requires replica.persistence.enabled: true
+ ##
+ existingClaim: ""
+ ## Redis® replicas service parameters
+ ##
+ service:
+ ## @param replica.service.type Redis® replicas service type
+ ##
+ type: ClusterIP
+ ## @param replica.service.ports.redis Redis® replicas service port
+ ##
+ ports:
+ redis: 6379
+ ## @param replica.service.nodePorts.redis Node port for Redis® replicas
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ ## NOTE: choose port between <30000-32767>
+ ##
+ nodePorts:
+ redis: ""
+ ## @param replica.service.externalTrafficPolicy Redis® replicas service external traffic policy
+ ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ ##
+ externalTrafficPolicy: Cluster
+ ## @param replica.service.internalTrafficPolicy Redis® replicas service internal traffic policy (requires Kubernetes v1.22 or greater to be usable)
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service-traffic-policy/
+ ##
+ internalTrafficPolicy: Cluster
+ ## @param replica.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
+ ##
+ extraPorts: []
+ ## @param replica.service.clusterIP Redis® replicas service Cluster IP
+ ##
+ clusterIP: ""
+ ## @param replica.service.loadBalancerIP Redis® replicas service Load Balancer IP
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+ ##
+ loadBalancerIP: ""
+ ## @param replica.service.loadBalancerSourceRanges Redis® replicas service Load Balancer sources
+ ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
+ ## e.g.
+ ## loadBalancerSourceRanges:
+ ## - 10.10.10.0/24
+ ##
+ loadBalancerSourceRanges: []
+ ## @param replica.service.annotations Additional custom annotations for Redis® replicas service
+ ##
+ annotations: {}
+ ## @param replica.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
+ ## If "ClientIP", consecutive client requests will be directed to the same Pod
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ ##
+ sessionAffinity: None
+ ## @param replica.service.sessionAffinityConfig Additional settings for the sessionAffinity
+ ## sessionAffinityConfig:
+ ## clientIP:
+ ## timeoutSeconds: 300
+ ##
+ sessionAffinityConfig: {}
+ ## @param replica.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-replicas pods
+ ##
+ terminationGracePeriodSeconds: 30
+ ## Autoscaling configuration
+ ##
+ autoscaling:
+ ## @param replica.autoscaling.enabled Enable replica autoscaling settings
+ ##
+ enabled: false
+ ## @param replica.autoscaling.minReplicas Minimum replicas for the pod autoscaling
+ ##
+ minReplicas: 1
+ ## @param replica.autoscaling.maxReplicas Maximum replicas for the pod autoscaling
+ ##
+ maxReplicas: 11
+ ## @param replica.autoscaling.targetCPU Percentage of CPU to consider when autoscaling
+ ##
+ targetCPU: ""
+ ## @param replica.autoscaling.targetMemory Percentage of Memory to consider when autoscaling
+ ##
+ targetMemory: ""
+ ## ServiceAccount configuration
+ ##
+ serviceAccount:
+ ## @param replica.serviceAccount.create Specifies whether a ServiceAccount should be created
+ ##
+ create: false
+ ## @param replica.serviceAccount.name The name of the ServiceAccount to use.
+ ## If not set and create is true, a name is generated using the common.names.fullname template
+ ##
+ name: ""
+ ## @param replica.serviceAccount.automountServiceAccountToken Whether to auto mount the service account token
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server
+ ##
+ automountServiceAccountToken: true
+ ## @param replica.serviceAccount.annotations Additional custom annotations for the ServiceAccount
+ ##
+ annotations: {}
+## @section Redis® Sentinel configuration parameters
+##
+
+sentinel:
+ ## @param sentinel.enabled Use Redis® Sentinel on Redis® pods.
+ ## IMPORTANT: this will disable the master and replicas services and
+ ## create a single Redis® service exposing both the Redis and Sentinel ports
+ ##
+ enabled: false
+ ## Bitnami Redis® Sentinel image version
+ ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/
+ ## @param sentinel.image.registry Redis® Sentinel image registry
+ ## @param sentinel.image.repository Redis® Sentinel image repository
+ ## @param sentinel.image.tag Redis® Sentinel image tag (immutable tags are recommended)
+ ## @param sentinel.image.digest Redis® Sentinel image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+ ## @param sentinel.image.pullPolicy Redis® Sentinel image pull policy
+ ## @param sentinel.image.pullSecrets Redis® Sentinel image pull secrets
+ ## @param sentinel.image.debug Enable image debug mode
+ ##
+ image:
+ registry: docker.io
+ repository: bitnami/redis-sentinel
+ tag: 7.0.7-debian-11-r10
+ digest: ""
+ ## Specify a imagePullPolicy
+ ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+ ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+ ##
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## e.g:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+ ## Enable debug mode
+ ##
+ debug: false
+ ## @param sentinel.masterSet Master set name
+ ##
+ masterSet: mymaster
+ ## @param sentinel.quorum Sentinel Quorum
+ ##
+ quorum: 2
+ ## @param sentinel.getMasterTimeout Amount of time to allow before get_sentinel_master_info() times out.
+ ## NOTE: This is directly related to the startupProbes which are configured to run every 10 seconds for a total of 22 failures. If adjusting this value, also adjust the startupProbes.
+ getMasterTimeout: 220
+ ## @param sentinel.automateClusterRecovery Automate cluster recovery in cases where the last replica is not considered a good replica and Sentinel won't automatically failover to it.
+ ## This also prevents any new replica from starting until the last remaining replica is elected as master to guarantee that it is the one to be elected by Sentinel, and not a newly started replica with no data.
+ ## NOTE: This feature requires a "downAfterMilliseconds" value less or equal to 2000.
+ ##
+ automateClusterRecovery: false
+ ## @param sentinel.redisShutdownWaitFailover Whether the Redis® master container waits for the failover at shutdown (in addition to the Redis® Sentinel container).
+ redisShutdownWaitFailover: true
+ ## Sentinel timing restrictions
+ ## @param sentinel.downAfterMilliseconds Timeout for detecting a Redis® node is down
+ ## @param sentinel.failoverTimeout Timeout for performing a election failover
+ ##
+ downAfterMilliseconds: 60000
+ failoverTimeout: 180000
+ ## @param sentinel.parallelSyncs Number of replicas that can be reconfigured in parallel to use the new master after a failover
+ ##
+ parallelSyncs: 1
+ ## @param sentinel.configuration Configuration for Redis® Sentinel nodes
+ ## ref: https://redis.io/topics/sentinel
+ ##
+ configuration: ""
+ ## @param sentinel.command Override default container command (useful when using custom images)
+ ##
+ command: []
+ ## @param sentinel.args Override default container args (useful when using custom images)
+ ##
+ args: []
+ ## @param sentinel.preExecCmds Additional commands to run prior to starting Redis® Sentinel
+ ##
+ preExecCmds: []
+ ## @param sentinel.extraEnvVars Array with extra environment variables to add to Redis® Sentinel nodes
+ ## e.g:
+ ## extraEnvVars:
+ ## - name: FOO
+ ## value: "bar"
+ ##
+ extraEnvVars: []
+ ## @param sentinel.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® Sentinel nodes
+ ##
+ extraEnvVarsCM: ""
+ ## @param sentinel.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® Sentinel nodes
+ ##
+ extraEnvVarsSecret: ""
+ ## @param sentinel.externalMaster.enabled Use external master for bootstrapping
+ ## @param sentinel.externalMaster.host External master host to bootstrap from
+ ## @param sentinel.externalMaster.port Port for Redis service external master host
+ ##
+ externalMaster:
+ enabled: false
+ host: ""
+ port: 6379
+ ## @param sentinel.containerPorts.sentinel Container port to open on Redis® Sentinel nodes
+ ##
+ containerPorts:
+ sentinel: 26379
+ ## Configure extra options for Redis® containers' liveness and readiness probes
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
+ ## @param sentinel.startupProbe.enabled Enable startupProbe on Redis® Sentinel nodes
+ ## @param sentinel.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
+ ## @param sentinel.startupProbe.periodSeconds Period seconds for startupProbe
+ ## @param sentinel.startupProbe.timeoutSeconds Timeout seconds for startupProbe
+ ## @param sentinel.startupProbe.failureThreshold Failure threshold for startupProbe
+ ## @param sentinel.startupProbe.successThreshold Success threshold for startupProbe
+ ##
+ startupProbe:
+ enabled: true
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 22
+ ## @param sentinel.livenessProbe.enabled Enable livenessProbe on Redis® Sentinel nodes
+ ## @param sentinel.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+ ## @param sentinel.livenessProbe.periodSeconds Period seconds for livenessProbe
+ ## @param sentinel.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+ ## @param sentinel.livenessProbe.failureThreshold Failure threshold for livenessProbe
+ ## @param sentinel.livenessProbe.successThreshold Success threshold for livenessProbe
+ ##
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 5
+ ## @param sentinel.readinessProbe.enabled Enable readinessProbe on Redis® Sentinel nodes
+ ## @param sentinel.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+ ## @param sentinel.readinessProbe.periodSeconds Period seconds for readinessProbe
+ ## @param sentinel.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+ ## @param sentinel.readinessProbe.failureThreshold Failure threshold for readinessProbe
+ ## @param sentinel.readinessProbe.successThreshold Success threshold for readinessProbe
+ ##
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 5
+ ## @param sentinel.customStartupProbe Custom startupProbe that overrides the default one
+ ##
+ customStartupProbe: {}
+ ## @param sentinel.customLivenessProbe Custom livenessProbe that overrides the default one
+ ##
+ customLivenessProbe: {}
+ ## @param sentinel.customReadinessProbe Custom readinessProbe that overrides the default one
+ ##
+ customReadinessProbe: {}
+ ## Persistence parameters
+ ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/
+ ##
+ persistence:
+ ## @param sentinel.persistence.enabled Enable persistence on Redis® sentinel nodes using Persistent Volume Claims (Experimental)
+ ##
+ enabled: false
+ ## @param sentinel.persistence.storageClass Persistent Volume storage class
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner
+ ##
+ storageClass: ""
+ ## @param sentinel.persistence.accessModes Persistent Volume access modes
+ ##
+ accessModes:
+ - ReadWriteOnce
+ ## @param sentinel.persistence.size Persistent Volume size
+ ##
+ size: 100Mi
+ ## @param sentinel.persistence.annotations Additional custom annotations for the PVC
+ ##
+ annotations: {}
+ ## @param sentinel.persistence.selector Additional labels to match for the PVC
+ ## e.g:
+ ## selector:
+ ## matchLabels:
+ ## app: my-app
+ ##
+ selector: {}
+ ## @param sentinel.persistence.dataSource Custom PVC data source
+ ##
+ dataSource: {}
+ ## @param sentinel.persistence.medium Provide a medium for `emptyDir` volumes.
+ ##
+ medium: ""
+ ## @param sentinel.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes.
+ ##
+ sizeLimit: ""
+ ## Redis® Sentinel resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param sentinel.resources.limits The resources limits for the Redis® Sentinel containers
+ ## @param sentinel.resources.requests The requested resources for the Redis® Sentinel containers
+ ##
+ resources:
+ limits: {}
+ requests: {}
+ ## Configure Container Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+ ## @param sentinel.containerSecurityContext.enabled Enabled Redis® Sentinel containers' Security Context
+ ## @param sentinel.containerSecurityContext.runAsUser Set Redis® Sentinel containers' Security Context runAsUser
+ ##
+ containerSecurityContext:
+ enabled: true
+ runAsUser: 1001
+ ## @param sentinel.lifecycleHooks for the Redis® sentinel container(s) to automate configuration before or after startup
+ ##
+ lifecycleHooks: {}
+ ## @param sentinel.extraVolumes Optionally specify extra list of additional volumes for the Redis® Sentinel
+ ##
+ extraVolumes: []
+ ## @param sentinel.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® Sentinel container(s)
+ ##
+ extraVolumeMounts: []
+ ## Redis® Sentinel service parameters
+ ##
+ service:
+ ## @param sentinel.service.type Redis® Sentinel service type
+ ##
+ type: ClusterIP
+ ## @param sentinel.service.ports.redis Redis® service port for Redis®
+ ## @param sentinel.service.ports.sentinel Redis® service port for Redis® Sentinel
+ ##
+ ports:
+ redis: 6379
+ sentinel: 26379
+ ## @param sentinel.service.nodePorts.redis Node port for Redis®
+ ## @param sentinel.service.nodePorts.sentinel Node port for Sentinel
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ ## NOTE: choose port between <30000-32767>
+ ## NOTE: By leaving these values blank, they will be generated by ports-configmap
+ ## If setting manually, please leave at least replica.replicaCount + 1 in between sentinel.service.nodePorts.redis and sentinel.service.nodePorts.sentinel to take into account the ports that will be created while incrementing that base port
+ ##
+ nodePorts:
+ redis: ""
+ sentinel: ""
+ ## @param sentinel.service.externalTrafficPolicy Redis® Sentinel service external traffic policy
+ ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ ##
+ externalTrafficPolicy: Cluster
+ ## @param sentinel.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
+ ##
+ extraPorts: []
+ ## @param sentinel.service.clusterIP Redis® Sentinel service Cluster IP
+ ##
+ clusterIP: ""
+ ## @param sentinel.service.loadBalancerIP Redis® Sentinel service Load Balancer IP
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+ ##
+ loadBalancerIP: ""
+ ## @param sentinel.service.loadBalancerSourceRanges Redis® Sentinel service Load Balancer sources
+ ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
+ ## e.g.
+ ## loadBalancerSourceRanges:
+ ## - 10.10.10.0/24
+ ##
+ loadBalancerSourceRanges: []
+ ## @param sentinel.service.annotations Additional custom annotations for Redis® Sentinel service
+ ##
+ annotations: {}
+ ## @param sentinel.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
+ ## If "ClientIP", consecutive client requests will be directed to the same Pod
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ ##
+ sessionAffinity: None
+ ## @param sentinel.service.sessionAffinityConfig Additional settings for the sessionAffinity
+ ## sessionAffinityConfig:
+ ## clientIP:
+ ## timeoutSeconds: 300
+ ##
+ sessionAffinityConfig: {}
+ ## @param sentinel.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-node pods
+ ##
+ terminationGracePeriodSeconds: 30
+
+## @section Other Parameters
+##
+
+## Network Policy configuration
+## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
+##
+networkPolicy:
+ ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources
+ ##
+ enabled: false
+ ## @param networkPolicy.allowExternal Don't require client label for connections
+ ## When set to false, only pods with the correct client label will have network access to the ports
+ ## Redis® is listening on. When true, Redis® will accept connections from any source
+ ## (with the correct destination port).
+ ##
+ allowExternal: true
+ ## @param networkPolicy.extraIngress Add extra ingress rules to the NetworkPolicy
+ ## e.g:
+ ## extraIngress:
+ ## - ports:
+ ## - port: 1234
+ ## from:
+ ## - podSelector:
+ ## - matchLabels:
+ ## - role: frontend
+ ## - podSelector:
+ ## - matchExpressions:
+ ## - key: role
+ ## operator: In
+ ## values:
+ ## - frontend
+ ##
+ extraIngress: []
+ ## @param networkPolicy.extraEgress Add extra egress rules to the NetworkPolicy
+ ## e.g:
+ ## extraEgress:
+ ## - ports:
+ ## - port: 1234
+ ## to:
+ ## - podSelector:
+ ## - matchLabels:
+ ## - role: frontend
+ ## - podSelector:
+ ## - matchExpressions:
+ ## - key: role
+ ## operator: In
+ ## values:
+ ## - frontend
+ ##
+ extraEgress: []
+ ## @param networkPolicy.ingressNSMatchLabels Labels to match to allow traffic from other namespaces
+ ## @param networkPolicy.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces
+ ##
+ ingressNSMatchLabels: {}
+ ingressNSPodMatchLabels: {}
+## PodSecurityPolicy configuration
+## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+##
+podSecurityPolicy:
+ ## @param podSecurityPolicy.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later
+ ##
+ create: false
+ ## @param podSecurityPolicy.enabled Enable PodSecurityPolicy's RBAC rules
+ ##
+ enabled: false
+## RBAC configuration
+##
+rbac:
+ ## @param rbac.create Specifies whether RBAC resources should be created
+ ##
+ create: false
+ ## @param rbac.rules Custom RBAC rules to set
+ ## e.g:
+ ## rules:
+ ## - apiGroups:
+ ## - ""
+ ## resources:
+ ## - pods
+ ## verbs:
+ ## - get
+ ## - list
+ ##
+ rules: []
+## ServiceAccount configuration
+##
+serviceAccount:
+ ## @param serviceAccount.create Specifies whether a ServiceAccount should be created
+ ##
+ create: true
+ ## @param serviceAccount.name The name of the ServiceAccount to use.
+ ## If not set and create is true, a name is generated using the common.names.fullname template
+ ##
+ name: ""
+ ## @param serviceAccount.automountServiceAccountToken Whether to auto mount the service account token
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server
+ ##
+ automountServiceAccountToken: true
+ ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount
+ ##
+ annotations: {}
+## Redis® Pod Disruption Budget configuration
+## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
+##
+pdb:
+ ## @param pdb.create Specifies whether a PodDisruptionBudget should be created
+ ##
+ create: false
+ ## @param pdb.minAvailable Min number of pods that must still be available after the eviction
+ ##
+ minAvailable: 1
+ ## @param pdb.maxUnavailable Max number of pods that can be unavailable after the eviction
+ ##
+ maxUnavailable: ""
+## TLS configuration
+##
+tls:
+ ## @param tls.enabled Enable TLS traffic
+ ##
+ enabled: false
+ ## @param tls.authClients Require clients to authenticate
+ ##
+ authClients: true
+ ## @param tls.autoGenerated Enable autogenerated certificates
+ ##
+ autoGenerated: false
+ ## @param tls.existingSecret The name of the existing secret that contains the TLS certificates
+ ##
+ existingSecret: ""
+ ## @param tls.certificatesSecret DEPRECATED. Use existingSecret instead.
+ ##
+ certificatesSecret: ""
+ ## @param tls.certFilename Certificate filename
+ ##
+ certFilename: ""
+ ## @param tls.certKeyFilename Certificate Key filename
+ ##
+ certKeyFilename: ""
+ ## @param tls.certCAFilename CA Certificate filename
+ ##
+ certCAFilename: ""
+ ## @param tls.dhParamsFilename File containing DH params (in order to support DH based ciphers)
+ ##
+ dhParamsFilename: ""
+
+## @section Metrics Parameters
+##
+
+metrics:
+ ## @param metrics.enabled Start a sidecar prometheus exporter to expose Redis® metrics
+ ##
+ enabled: false
+ ## Bitnami Redis® Exporter image
+ ## ref: https://hub.docker.com/r/bitnami/redis-exporter/tags/
+ ## @param metrics.image.registry Redis® Exporter image registry
+ ## @param metrics.image.repository Redis® Exporter image repository
+ ## @param metrics.image.tag Redis® Exporter image tag (immutable tags are recommended)
+ ## @param metrics.image.digest Redis® Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+ ## @param metrics.image.pullPolicy Redis® Exporter image pull policy
+ ## @param metrics.image.pullSecrets Redis® Exporter image pull secrets
+ ##
+ image:
+ registry: docker.io
+ repository: bitnami/redis-exporter
+ tag: 1.45.0-debian-11-r26
+ digest: ""
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## e.g:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+ ## Configure extra options for Redis® containers' liveness, readiness & startup probes
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
+ ## @param metrics.startupProbe.enabled Enable startupProbe on Redis® replicas nodes
+ ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
+ ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe
+ ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe
+ ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe
+ ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe
+ ##
+ startupProbe:
+ enabled: false
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 5
+ ## @param metrics.livenessProbe.enabled Enable livenessProbe on Redis® replicas nodes
+ ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+ ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe
+ ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+ ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe
+ ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe
+ ##
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 5
+ ## @param metrics.readinessProbe.enabled Enable readinessProbe on Redis® replicas nodes
+ ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+ ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe
+ ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+ ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe
+ ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe
+ ##
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+ ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one
+ ##
+ customStartupProbe: {}
+ ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one
+ ##
+ customLivenessProbe: {}
+ ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one
+ ##
+ customReadinessProbe: {}
+ ## @param metrics.command Override default metrics container init command (useful when using custom images)
+ ##
+ command: []
+ ## @param metrics.redisTargetHost A way to specify an alternative Redis® hostname
+ ## Useful for certificate CN/SAN matching
+ ##
+ redisTargetHost: "localhost"
+ ## @param metrics.extraArgs Extra arguments for Redis® exporter, for example:
+ ## e.g.:
+ ## extraArgs:
+ ## check-keys: myKey,myOtherKey
+ ##
+ extraArgs: {}
+ ## @param metrics.extraEnvVars Array with extra environment variables to add to Redis® exporter
+ ## e.g:
+ ## extraEnvVars:
+ ## - name: FOO
+ ## value: "bar"
+ ##
+ extraEnvVars: []
+ ## Configure Container Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+ ## @param metrics.containerSecurityContext.enabled Enabled Redis® exporter containers' Security Context
+ ## @param metrics.containerSecurityContext.runAsUser Set Redis® exporter containers' Security Context runAsUser
+ ##
+ containerSecurityContext:
+ enabled: true
+ runAsUser: 1001
+ ## @param metrics.extraVolumes Optionally specify extra list of additional volumes for the Redis® metrics sidecar
+ ##
+ extraVolumes: []
+ ## @param metrics.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® metrics sidecar
+ ##
+ extraVolumeMounts: []
+ ## Redis® exporter resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param metrics.resources.limits The resources limits for the Redis® exporter container
+ ## @param metrics.resources.requests The requested resources for the Redis® exporter container
+ ##
+ resources:
+ limits: {}
+ requests: {}
+ ## @param metrics.podLabels Extra labels for Redis® exporter pods
+ ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+ ##
+ podLabels: {}
+ ## @param metrics.podAnnotations [object] Annotations for Redis® exporter pods
+ ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+ ##
+ podAnnotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "9121"
+ ## Redis® exporter service parameters
+ ##
+ service:
+ ## @param metrics.service.type Redis® exporter service type
+ ##
+ type: ClusterIP
+ ## @param metrics.service.port Redis® exporter service port
+ ##
+ port: 9121
+ ## @param metrics.service.externalTrafficPolicy Redis® exporter service external traffic policy
+ ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ ##
+ externalTrafficPolicy: Cluster
+ ## @param metrics.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
+ ##
+ extraPorts: []
+ ## @param metrics.service.loadBalancerIP Redis® exporter service Load Balancer IP
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+ ##
+ loadBalancerIP: ""
+ ## @param metrics.service.loadBalancerSourceRanges Redis® exporter service Load Balancer sources
+ ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
+ ## e.g.
+ ## loadBalancerSourceRanges:
+ ## - 10.10.10.0/24
+ ##
+ loadBalancerSourceRanges: []
+ ## @param metrics.service.annotations Additional custom annotations for Redis® exporter service
+ ##
+ annotations: {}
+ ## Prometheus Service Monitor
+ ## ref: https://github.com/coreos/prometheus-operator
+ ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+ ##
+ serviceMonitor:
+ ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator
+ ##
+ enabled: false
+ ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created
+ ##
+ namespace: ""
+ ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped
+ ##
+ interval: 30s
+ ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended
+ ##
+ scrapeTimeout: ""
+ ## @param metrics.serviceMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping.
+ ##
+ relabellings: []
+ ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion.
+ ##
+ metricRelabelings: []
+ ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint
+ ##
+ honorLabels: false
+ ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus
+ ##
+ additionalLabels: {}
+ ## @param metrics.serviceMonitor.podTargetLabels Labels from the Kubernetes pod to be transferred to the created metrics
+ ##
+ podTargetLabels: []
+ ## Custom PrometheusRule to be defined
+ ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
+ ##
+ prometheusRule:
+ ## @param metrics.prometheusRule.enabled Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator
+ ##
+ enabled: false
+ ## @param metrics.prometheusRule.namespace The namespace in which the prometheusRule will be created
+ ##
+ namespace: ""
+ ## @param metrics.prometheusRule.additionalLabels Additional labels for the prometheusRule
+ ##
+ additionalLabels: {}
+ ## @param metrics.prometheusRule.rules Custom Prometheus rules
+ ## e.g:
+ ## rules:
+ ## - alert: RedisDown
+ ## expr: redis_up{service="{{ template "common.names.fullname" . }}-metrics"} == 0
+ ## for: 2m
+ ## labels:
+ ## severity: error
+ ## annotations:
+ ## summary: Redis® instance {{ "{{ $labels.instance }}" }} down
+ ## description: Redis® instance {{ "{{ $labels.instance }}" }} is down
+ ## - alert: RedisMemoryHigh
+ ## expr: >
+ ## redis_memory_used_bytes{service="{{ template "common.names.fullname" . }}-metrics"} * 100
+ ## /
+ ## redis_memory_max_bytes{service="{{ template "common.names.fullname" . }}-metrics"}
+ ## > 90
+ ## for: 2m
+ ## labels:
+ ## severity: error
+ ## annotations:
+ ## summary: Redis® instance {{ "{{ $labels.instance }}" }} is using too much memory
+ ## description: |
+ ## Redis® instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory.
+ ## - alert: RedisKeyEviction
+ ## expr: |
+ ## increase(redis_evicted_keys_total{service="{{ template "common.names.fullname" . }}-metrics"}[5m]) > 0
+ ## for: 1s
+ ## labels:
+ ## severity: error
+ ## annotations:
+ ## summary: Redis® instance {{ "{{ $labels.instance }}" }} has evicted keys
+ ## description: |
+ ## Redis® instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes.
+ ##
+ rules: []
+
+## @section Init Container Parameters
+##
+
+## 'volumePermissions' init container parameters
+## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values
+## based on the *podSecurityContext/*containerSecurityContext parameters
+##
+volumePermissions:
+ ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup`
+ ##
+ enabled: false
+ ## Bitnami Shell image
+ ## ref: https://hub.docker.com/r/bitnami/bitnami-shell/tags/
+ ## @param volumePermissions.image.registry Bitnami Shell image registry
+ ## @param volumePermissions.image.repository Bitnami Shell image repository
+ ## @param volumePermissions.image.tag Bitnami Shell image tag (immutable tags are recommended)
+ ## @param volumePermissions.image.digest Bitnami Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+ ## @param volumePermissions.image.pullPolicy Bitnami Shell image pull policy
+ ## @param volumePermissions.image.pullSecrets Bitnami Shell image pull secrets
+ ##
+ image:
+ registry: docker.io
+ repository: bitnami/bitnami-shell
+ tag: 11-debian-11-r72
+ digest: ""
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## e.g:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+ ## Init container's resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param volumePermissions.resources.limits The resources limits for the init container
+ ## @param volumePermissions.resources.requests The requested resources for the init container
+ ##
+ resources:
+ limits: {}
+ requests: {}
+ ## Init container Container Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+ ## @param volumePermissions.containerSecurityContext.runAsUser Set init container's Security Context runAsUser
+ ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the
+ ## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2`
+ ## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed)
+ ##
+ containerSecurityContext:
+ runAsUser: 0
+
+## init-sysctl container parameters
+## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings)
+##
+sysctl:
+ ## @param sysctl.enabled Enable init container to modify Kernel settings
+ ##
+ enabled: false
+ ## Bitnami Shell image
+ ## ref: https://hub.docker.com/r/bitnami/bitnami-shell/tags/
+ ## @param sysctl.image.registry Bitnami Shell image registry
+ ## @param sysctl.image.repository Bitnami Shell image repository
+ ## @param sysctl.image.tag Bitnami Shell image tag (immutable tags are recommended)
+ ## @param sysctl.image.digest Bitnami Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+ ## @param sysctl.image.pullPolicy Bitnami Shell image pull policy
+ ## @param sysctl.image.pullSecrets Bitnami Shell image pull secrets
+ ##
+ image:
+ registry: docker.io
+ repository: bitnami/bitnami-shell
+ tag: 11-debian-11-r72
+ digest: ""
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## e.g:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+ ## @param sysctl.command Override default init-sysctl container command (useful when using custom images)
+ ##
+ command: []
+ ## @param sysctl.mountHostSys Mount the host `/sys` folder to `/host-sys`
+ ##
+ mountHostSys: false
+ ## Init container's resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param sysctl.resources.limits The resources limits for the init container
+ ## @param sysctl.resources.requests The requested resources for the init container
+ ##
+ resources:
+ limits: {}
+ requests: {}
+
+## @section useExternalDNS Parameters
+##
+## @param useExternalDNS.enabled Enable various syntax that would enable external-dns to work. Note this requires a working installation of `external-dns` to be usable.
+## @param useExternalDNS.additionalAnnotations Extra annotations to be utilized when `external-dns` is enabled.
+## @param useExternalDNS.annotationKey The annotation key utilized when `external-dns` is enabled. Setting this to `false` will disable annotations.
+## @param useExternalDNS.suffix The DNS suffix utilized when `external-dns` is enabled. Note that we prepend the suffix with the full name of the release.
+##
+useExternalDNS:
+ enabled: false
+ suffix: ""
+ annotationKey: external-dns.alpha.kubernetes.io/
+ additionalAnnotations: {}
diff --git a/charts/penpot/templates/_helpers.tpl b/charts/penpot/templates/_helpers.tpl
new file mode 100644
index 0000000..0cf66bf
--- /dev/null
+++ b/charts/penpot/templates/_helpers.tpl
@@ -0,0 +1,72 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "penpot.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "penpot.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "penpot.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels.
+*/}}
+{{- define "penpot.labels" -}}
+helm.sh/chart: {{ include "penpot.chart" . }}
+app.kubernetes.io/name: {{ include "penpot.name" . }}-frontend
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels.
+*/}}
+{{- define "penpot.frontendSelectorLabels" -}}
+app.kubernetes.io/name: {{ include "penpot.name" . }}-frontend
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+{{- define "penpot.backendSelectorLabels" -}}
+app.kubernetes.io/name: {{ include "penpot.name" . }}-backend
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+{{- define "penpot.exporterSelectorLabels" -}}
+app.kubernetes.io/name: {{ include "penpot.name" . }}-exporter
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use.
+*/}}
+{{- define "penpot.serviceAccountName" -}}
+{{- if .Values.serviceAccount.enabled -}}
+ {{ default (include "penpot.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
diff --git a/charts/penpot/templates/backend/deployment.yaml b/charts/penpot/templates/backend/deployment.yaml
new file mode 100644
index 0000000..064123d
--- /dev/null
+++ b/charts/penpot/templates/backend/deployment.yaml
@@ -0,0 +1,375 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "penpot.fullname" . }}-backend
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "penpot.labels" . | nindent 4 }}
+spec:
+ replicas: {{ .Values.backend.replicaCount }}
+ selector:
+ matchLabels:
+ {{- include "penpot.backendSelectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "penpot.backendSelectorLabels" . | nindent 8 }}
+ spec:
+ {{- with .Values.global.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{ if .Values.backend.podSecurityContext.enabled }}
+ securityContext:
+ {{- omit .Values.backend.podSecurityContext "enabled" | toYaml | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include "penpot.serviceAccountName" . }}
+ affinity:
+ podAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/instance
+ operator: In
+ values:
+ - {{ .Release.Name }}
+ topologyKey: "kubernetes.io/hostname"
+ containers:
+ - name: {{ .Chart.Name }}-backend
+ {{ if .Values.backend.containerSecurityContext.enabled }}
+ securityContext:
+ {{- omit .Values.backend.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ image: "{{ .Values.backend.image.repository }}:{{ .Values.backend.image.tag }}"
+ imagePullPolicy: {{ .Values.backend.image.imagePullPolicy }}
+ volumeMounts:
+ - mountPath: /opt/data
+ name: app-data
+ readOnly: false
+ env:
+ - name: PENPOT_PUBLIC_URI
+ value: {{ .Values.config.publicURI | quote }}
+ - name: PENPOT_FLAGS
+ value: "$PENPOT_FLAGS {{ .Values.config.flags }}"
+ - name: PENPOT_SECRET_KEY
+ value: {{ .Values.config.apiSecretKey | quote }}
+ - name: PENPOT_DATABASE_URI
+ value: "postgresql://{{ .Values.config.postgresql.host }}:{{ .Values.config.postgresql.port }}/{{ .Values.config.postgresql.database }}"
+ - name: PENPOT_DATABASE_USERNAME
+ {{- if not .Values.config.postgresql.secretKeys.usernameKey }}
+ value: {{ .Values.config.postgresql.username | quote }}
+ {{- else }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.postgresql.existingSecret }}
+ key: {{ .Values.config.postgresql.secretKeys.usernameKey }}
+ {{- end }}
+ - name: PENPOT_DATABASE_PASSWORD
+ {{- if not .Values.config.postgresql.secretKeys.passwordKey }}
+ value: {{ .Values.config.postgresql.password | quote }}
+ {{- else }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.postgresql.existingSecret }}
+ key: {{ .Values.config.postgresql.secretKeys.passwordKey }}
+ {{- end }}
+ - name: PENPOT_REDIS_URI
+ value: "redis://{{ .Values.config.redis.host }}:{{ .Values.config.redis.port }}/{{ .Values.config.redis.database }}"
+ - name: PENPOT_ASSETS_STORAGE_BACKEND
+ value: {{ .Values.config.assets.storageBackend | quote }}
+ {{- if eq .Values.config.assets.storageBackend "assets-fs" }}
+ - name: PENPOT_STORAGE_ASSETS_FS_DIRECTORY
+ value: {{ .Values.config.assets.filesystem.directory | quote }}
+ {{- else if eq .Values.config.assets.storageBackend "assets-s3" }}
+ - name: PENPOT_STORAGE_ASSETS_S3_REGION
+ value: {{ .Values.config.assets.s3.region | quote }}
+ - name: PENPOT_STORAGE_ASSETS_S3_BUCKET
+ value: {{ .Values.config.assets.s3.bucket | quote }}
+ - name: AWS_ACCESS_KEY_ID
+ {{- if not .Values.config.assets.s3.secretKeys.accessKeyIDKey }}
+ value: {{ .Values.config.assets.s3.accessKeyID | quote }}
+ {{- else }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.assets.s3.existingSecret }}
+ key: {{ .Values.config.assets.s3.secretKeys.accessKeyIDKey }}
+ {{- end }}
+ - name: AWS_SECRET_ACCESS_KEY
+ {{- if not .Values.config.assets.s3.secretKeys.secretAccessKey }}
+ value: {{ .Values.config.assets.s3.secretAccessKey | quote }}
+ {{- else }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.assets.s3.existingSecret }}
+ key: {{ .Values.config.assets.s3.secretKeys.secretAccessKey }}
+ {{- end }}
+ - name: PENPOT_STORAGE_ASSETS_S3_ENDPOINT
+ {{- if not .Values.config.assets.s3.secretKeys.endpointURIKey }}
+ value: {{ .Values.config.assets.s3.endpointURI | quote }}
+ {{- else }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.assets.s3.existingSecret }}
+ key: {{ .Values.config.assets.s3.secretKeys.endpointURIKey }}
+ {{- end }}
+ {{- end }}
+ - name: PENPOT_TELEMETRY_ENABLED
+ value: {{ .Values.config.telemetryEnabled | quote }}
+
+ {{- if .Values.config.smtp.enabled }}
+ {{- if .Values.config.smtp.defaultFrom }}
+ - name: PENPOT_SMTP_DEFAULT_FROM
+ value: {{ .Values.config.smtp.defaultFrom | quote }}
+ {{- end }}
+ {{- if .Values.config.smtp.defaultReplyTo }}
+ - name: PENPOT_SMTP_DEFAULT_REPLY_TO
+ value: {{ .Values.config.smtp.defaultReplyTo | quote }}
+ {{- end }}
+ {{- if .Values.config.smtp.host }}
+ - name: PENPOT_SMTP_HOST
+ value: {{ .Values.config.smtp.host | quote }}
+ {{- end }}
+ {{- if .Values.config.smtp.port }}
+ - name: PENPOT_SMTP_PORT
+ value: {{ .Values.config.smtp.port | quote }}
+ {{- end }}
+ {{- if not .Values.config.smtp.secretKeys.usernameKey }}
+ - name: PENPOT_SMTP_USERNAME
+ value: {{ .Values.config.smtp.username | quote }}
+ {{- else }}
+ - name: PENPOT_SMTP_USERNAME
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.smtp.existingSecret }}
+ key: {{ .Values.config.smtp.secretKeys.usernameKey }}
+ {{- end }}
+ {{- if not .Values.config.smtp.secretKeys.passwordKey }}
+ - name: PENPOT_SMTP_PASSWORD
+ value: {{ .Values.config.smtp.password | quote }}
+ {{- else }}
+ - name: PENPOT_SMTP_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.smtp.existingSecret }}
+ key: {{ .Values.config.smtp.secretKeys.passwordKey }}
+ {{- end }}
+ {{- if .Values.config.smtp.tls }}
+ - name: PENPOT_SMTP_TLS
+ value: {{ .Values.config.smtp.tls | quote }}
+ {{- end }}
+ {{- if .Values.config.smtp.ssl }}
+ - name: PENPOT_SMTP_SSL
+ value: {{ .Values.config.smtp.ssl | quote }}
+ {{- end }}
+ {{- end }}
+
+
+ {{- if .Values.config.registrationDomainWhitelist }}
+ - name: PENPOT_REGISTRATION_DOMAIN_WHITELIST
+ value: {{ .Values.config.registrationDomainWhitelist | quote }}
+ {{- end }}
+
+ {{- if .Values.config.providers.google.enabled }}
+ {{- if not .Values.config.providers.secretKeys.googleClientIDKey }}
+ - name: PENPOT_GOOGLE_CLIENT_ID
+ value: {{ .Values.config.providers.google.clientID | quote }}
+ {{- else }}
+ - name: PENPOT_GOOGLE_CLIENT_ID
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.googleClientIDKey }}
+ {{- end }}
+ {{- if not .Values.config.providers.secretKeys.googleClientSecretKey}}
+ - name: PENPOT_GOOGLE_CLIENT_SECRET
+ value: {{ .Values.config.providers.google.clientSecret | quote }}
+ {{- else }}
+ - name: PENPOT_GOOGLE_CLIENT_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.googleClientSecretKey }}
+ {{- end }}
+ {{- end }}
+
+ {{- if .Values.config.providers.github.enabled }}
+ {{- if not .Values.config.providers.secretKeys.githubClientIDKey }}
+ - name: PENPOT_GITHUB_CLIENT_ID
+ value: {{ .Values.config.providers.github.clientID | quote }}
+ {{- else }}
+ - name: PENPOT_GITHUB_CLIENT_ID
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.githubClientIDKey }}
+ {{- end }}
+ {{- if not .Values.config.providers.secretKeys.githubClientSecretKey }}
+ - name: PENPOT_GITHUB_CLIENT_SECRET
+ value: {{ .Values.config.providers.github.clientSecret | quote }}
+ {{- else }}
+ - name: PENPOT_GITHUB_CLIENT_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.githubClientSecretKey }}
+ {{- end }}
+ {{- end }}
+
+ {{- if .Values.config.providers.gitlab.enabled }}
+ {{- if .Values.config.providers.gitlab.baseURI }}
+ - name: PENPOT_GITLAB_BASE_URI
+ value: {{ .Values.config.providers.gitlab.baseURI | quote }}
+ {{- end }}
+ {{- if not .Values.config.providers.secretKeys.gitlabClientIDKey }}
+ - name: PENPOT_GITLAB_CLIENT_ID
+ value: {{ .Values.config.providers.gitlab.clientID | quote }}
+ {{- else }}
+ - name: PENPOT_GITLAB_CLIENT_ID
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.gitlabClientIDKey }}
+ {{- end }}
+ {{- if not .Values.config.providers.secretKeys.gitlabClientSecretKey }}
+ - name: PENPOT_GITLAB_CLIENT_SECRET
+ value: {{ .Values.config.providers.gitlab.clientSecret | quote }}
+ {{- else }}
+ - name: PENPOT_GITLAB_CLIENT_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.gitlabClientSecretKey }}
+ {{- end }}
+ {{- end }}
+
+ {{- if .Values.config.providers.oidc.enabled }}
+ {{- if .Values.config.providers.oidc.baseURI }}
+ - name: PENPOT_OIDC_BASE_URI
+ value: {{ .Values.config.providers.oidc.baseURI | quote }}
+ {{- end }}
+ {{- if not .Values.config.providers.secretKeys.oidcClientIDKey }}
+ - name: PENPOT_OIDC_CLIENT_ID
+ value: {{ .Values.config.providers.oidc.clientID | quote}}
+ {{- else }}
+ - name: PENPOT_OIDC_CLIENT_ID
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.oidcClientIDKey }}
+ {{- end }}
+ {{- if not .Values.config.providers.secretKeys.oidcClientSecretKey}}
+ - name: PENPOT_OIDC_CLIENT_SECRET
+ value: {{ .Values.config.providers.oidc.clientSecret | quote }}
+ {{- else }}
+ - name: PENPOT_OIDC_CLIENT_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.oidcClientSecretKey }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.authURI }}
+ - name: PENPOT_OIDC_AUTH_URI
+ value: {{ .Values.config.providers.oidc.authURI | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.tokenURI }}
+ - name: PENPOT_OIDC_TOKEN_URI
+ value: {{ .Values.config.providers.oidc.tokenURI | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.userURI }}
+ - name: PENPOT_OIDC_USER_URI
+ value: {{ .Values.config.providers.oidc.userURI | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.roles }}
+ - name: PENPOT_OIDC_ROLES
+ value: {{ .Values.config.providers.oidc.roles | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.rolesAttribute }}
+ - name: PENPOT_OIDC_ROLES_ATTR
+ value: {{ .Values.config.providers.oidc.rolesAttribute | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.scopes }}
+ - name: PENPOT_OIDC_SCOPES
+ value: {{ .Values.config.providers.oidc.scopes | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.nameAttribute }}
+ - name: PENPOT_OIDC_NAME_ATTR
+ value: {{ .Values.config.providers.oidc.nameAttribute | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.emailAttribute }}
+ - name: PENPOT_OIDC_EMAIL_ATTR
+ value: {{ .Values.config.providers.oidc.emailAttribute | quote }}
+ {{- end }}
+ {{- end }}
+
+ {{- if .Values.config.providers.ldap.enabled }}
+ {{- if .Values.config.providers.ldap.host }}
+ - name: PENPOT_LDAP_HOST
+ value: {{ .Values.config.providers.ldap.host | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.port }}
+ - name: PENPOT_LDAP_PORT
+ value: {{ .Values.config.providers.ldap.port | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.ssl }}
+ - name: PENPOT_LDAP_SSL
+ value: {{ .Values.config.providers.ldap.ssl | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.startTLS }}
+ - name: PENPOT_LDAP_STARTTLS
+ value: {{ .Values.config.providers.ldap.startTLS | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.baseDN }}
+ - name: PENPOT_LDAP_BASE_DN
+ value: {{ .Values.config.providers.ldap.baseDN | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.bindDN }}
+ - name: PENPOT_LDAP_BIND_DN
+ value: {{ .Values.config.providers.ldap.bindDN | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.bindPassword }}
+ - name: PENPOT_LDAP_BIND_PASSWORD
+ value: {{ .Values.config.providers.ldap.bindPassword | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.attributesUsername }}
+ - name: PENPOT_LDAP_ATTRS_USERNAME
+ value: {{ .Values.config.providers.ldap.attributesUsername | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.attributesEmail }}
+ - name: PENPOT_LDAP_ATTRS_EMAIL
+ value: {{ .Values.config.providers.ldap.attributesEmail | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.attributesFullname }}
+ - name: PENPOT_LDAP_ATTRS_FULLNAME
+ value: {{ .Values.config.providers.ldap.attributesFullname | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.attributesPhoto }}
+ - name: PENPOT_LDAP_ATTRS_PHOTO
+ value: {{ .Values.config.providers.ldap.attributesPhoto | quote }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - name: http
+ containerPort: {{ .Values.backend.service.port }}
+ protocol: TCP
+ resources:
+ {{- toYaml .Values.backend.resources | nindent 12 }}
+ {{- with .Values.backend.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.backend.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.backend.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ volumes:
+ - name: app-data
+ {{- if .Values.persistence.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ .Values.persistence.existingClaim | default ( include "penpot.fullname" . ) }}
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
\ No newline at end of file
diff --git a/charts/penpot/templates/backend/service.yaml b/charts/penpot/templates/backend/service.yaml
new file mode 100644
index 0000000..b9668d1
--- /dev/null
+++ b/charts/penpot/templates/backend/service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "penpot.fullname" . }}-backend
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "penpot.labels" . | nindent 4 }}
+spec:
+ type: {{ .Values.backend.service.type }}
+ ports:
+ - port: {{ .Values.backend.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
+ selector:
+ {{- include "penpot.backendSelectorLabels" . | nindent 4 }}
diff --git a/charts/penpot/templates/exporter/deployment.yaml b/charts/penpot/templates/exporter/deployment.yaml
new file mode 100644
index 0000000..dccbdf0
--- /dev/null
+++ b/charts/penpot/templates/exporter/deployment.yaml
@@ -0,0 +1,353 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "penpot.fullname" . }}-exporter
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "penpot.labels" . | nindent 4 }}
+spec:
+ replicas: {{ .Values.exporter.replicaCount }}
+ selector:
+ matchLabels:
+ {{- include "penpot.exporterSelectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "penpot.exporterSelectorLabels" . | nindent 8 }}
+ spec:
+ {{- with .Values.global.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include "penpot.serviceAccountName" . }}
+ {{ if .Values.exporter.podSecurityContext.enabled }}
+ securityContext:
+ {{- omit .Values.exporter.podSecurityContext "enabled" | toYaml | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: {{ .Chart.Name }}-exporter
+ {{ if .Values.exporter.containerSecurityContext.enabled }}
+ securityContext:
+ {{- omit .Values.exporter.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ image: "{{ .Values.exporter.image.repository }}:{{ .Values.exporter.image.tag }}"
+ imagePullPolicy: {{ .Values.exporter.image.imagePullPolicy }}
+ env:
+ - name: PENPOT_PUBLIC_URI
+ value: {{ .Values.config.publicURI | quote }}
+ - name: PENPOT_FLAGS
+ value: "$PENPOT_FLAGS {{ .Values.config.flags }}"
+ - name: PENPOT_SECRET_KEY
+ value: {{ .Values.config.apiSecretKey | quote }}
+ - name: PENPOT_DATABASE_URI
+ value: "postgresql://{{ .Values.config.postgresql.host }}:{{ .Values.config.postgresql.port }}/{{ .Values.config.postgresql.database }}"
+ - name: PENPOT_DATABASE_USERNAME
+ {{- if not .Values.config.postgresql.secretKeys.usernameKey }}
+ value: {{ .Values.config.postgresql.username | quote }}
+ {{- else }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.postgresql.existingSecret }}
+ key: {{ .Values.config.postgresql.secretKeys.usernameKey }}
+ {{- end }}
+ - name: PENPOT_DATABASE_PASSWORD
+ {{- if not .Values.config.postgresql.secretKeys.passwordKey }}
+ value: {{ .Values.config.postgresql.password | quote }}
+ {{- else }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.postgresql.existingSecret }}
+ key: {{ .Values.config.postgresql.secretKeys.passwordKey }}
+ {{- end }}
+ - name: PENPOT_REDIS_URI
+ value: "redis://{{ .Values.config.redis.host }}:{{ .Values.config.redis.port }}/{{ .Values.config.redis.database }}"
+ - name: PENPOT_ASSETS_STORAGE_BACKEND
+ value: {{ .Values.config.assets.storageBackend | quote }}
+ {{- if eq .Values.config.assets.storageBackend "assets-fs" }}
+ - name: PENPOT_STORAGE_ASSETS_FS_DIRECTORY
+ value: {{ .Values.config.assets.filesystem.directory | quote }}
+ {{- else if eq .Values.config.assets.storageBackend "assets-s3" }}
+ - name: PENPOT_STORAGE_ASSETS_S3_REGION
+ value: {{ .Values.config.assets.s3.region | quote }}
+ - name: PENPOT_STORAGE_ASSETS_S3_BUCKET
+ value: {{ .Values.config.assets.s3.bucket | quote }}
+ - name: AWS_ACCESS_KEY_ID
+ {{- if not .Values.config.assets.s3.secretKeys.accessKeyIDKey }}
+ value: {{ .Values.config.assets.s3.accessKeyID | quote }}
+ {{- else }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.assets.s3.existingSecret }}
+ key: {{ .Values.config.assets.s3.secretKeys.accessKeyIDKey }}
+ {{- end }}
+ - name: AWS_SECRET_ACCESS_KEY
+ {{- if not .Values.config.assets.s3.secretKeys.secretAccessKey }}
+ value: {{ .Values.config.assets.s3.secretAccessKey | quote }}
+ {{- else }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.assets.s3.existingSecret }}
+ key: {{ .Values.config.assets.s3.secretKeys.secretAccessKey }}
+ {{- end }}
+ - name: PENPOT_STORAGE_ASSETS_S3_ENDPOINT
+ {{- if not .Values.config.assets.s3.secretKeys.endpointURIKey }}
+ value: {{ .Values.config.assets.s3.endpointURI | quote }}
+ {{- else }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.assets.s3.existingSecret }}
+ key: {{ .Values.config.assets.s3.secretKeys.endpointURIKey }}
+ {{- end }}
+ {{- end }}
+ - name: PENPOT_TELEMETRY_ENABLED
+ value: {{ .Values.config.telemetryEnabled | quote }}
+
+ {{- if .Values.config.smtp.enabled }}
+ {{- if .Values.config.smtp.defaultFrom }}
+ - name: PENPOT_SMTP_DEFAULT_FROM
+ value: {{ .Values.config.smtp.defaultFrom | quote }}
+ {{- end }}
+ {{- if .Values.config.smtp.defaultReplyTo }}
+ - name: PENPOT_SMTP_DEFAULT_REPLY_TO
+ value: {{ .Values.config.smtp.defaultReplyTo | quote }}
+ {{- end }}
+ {{- if .Values.config.smtp.host }}
+ - name: PENPOT_SMTP_HOST
+ value: {{ .Values.config.smtp.host | quote }}
+ {{- end }}
+ {{- if .Values.config.smtp.port }}
+ - name: PENPOT_SMTP_PORT
+ value: {{ .Values.config.smtp.port | quote }}
+ {{- end }}
+ {{- if not .Values.config.smtp.secretKeys.usernameKey }}
+ - name: PENPOT_SMTP_USERNAME
+ value: {{ .Values.config.smtp.username | quote }}
+ {{- else }}
+ - name: PENPOT_SMTP_USERNAME
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.smtp.existingSecret }}
+ key: {{ .Values.config.smtp.secretKeys.usernameKey }}
+ {{- end }}
+ {{- if not .Values.config.smtp.secretKeys.passwordKey }}
+ - name: PENPOT_SMTP_PASSWORD
+ value: {{ .Values.config.smtp.password | quote }}
+ {{- else }}
+ - name: PENPOT_SMTP_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.smtp.existingSecret }}
+ key: {{ .Values.config.smtp.secretKeys.passwordKey }}
+ {{- end }}
+ {{- if .Values.config.smtp.tls }}
+ - name: PENPOT_SMTP_TLS
+ value: {{ .Values.config.smtp.tls | quote }}
+ {{- end }}
+ {{- if .Values.config.smtp.ssl }}
+ - name: PENPOT_SMTP_SSL
+ value: {{ .Values.config.smtp.ssl | quote }}
+ {{- end }}
+ {{- end }}
+
+
+ {{- if .Values.config.registrationDomainWhitelist }}
+ - name: PENPOT_REGISTRATION_DOMAIN_WHITELIST
+ value: {{ .Values.config.registrationDomainWhitelist | quote }}
+ {{- end }}
+
+ {{- if .Values.config.providers.google.enabled }}
+ {{- if not .Values.config.providers.secretKeys.googleClientIDKey }}
+ - name: PENPOT_GOOGLE_CLIENT_ID
+ value: {{ .Values.config.providers.google.clientID | quote }}
+ {{- else }}
+ - name: PENPOT_GOOGLE_CLIENT_ID
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.googleClientIDKey }}
+ {{- end }}
+ {{- if not .Values.config.providers.secretKeys.googleClientSecretKey}}
+ - name: PENPOT_GOOGLE_CLIENT_SECRET
+ value: {{ .Values.config.providers.google.clientSecret | quote }}
+ {{- else }}
+ - name: PENPOT_GOOGLE_CLIENT_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.googleClientSecretKey }}
+ {{- end }}
+ {{- end }}
+
+ {{- if .Values.config.providers.github.enabled }}
+ {{- if not .Values.config.providers.secretKeys.githubClientIDKey }}
+ - name: PENPOT_GITHUB_CLIENT_ID
+ value: {{ .Values.config.providers.github.clientID | quote }}
+ {{- else }}
+ - name: PENPOT_GITHUB_CLIENT_ID
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.githubClientIDKey }}
+ {{- end }}
+ {{- if not .Values.config.providers.secretKeys.githubClientSecretKey }}
+ - name: PENPOT_GITHUB_CLIENT_SECRET
+ value: {{ .Values.config.providers.github.clientSecret | quote }}
+ {{- else }}
+ - name: PENPOT_GITHUB_CLIENT_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.githubClientSecretKey }}
+ {{- end }}
+ {{- end }}
+
+ {{- if .Values.config.providers.gitlab.enabled }}
+ {{- if .Values.config.providers.gitlab.baseURI }}
+ - name: PENPOT_GITLAB_BASE_URI
+ value: {{ .Values.config.providers.gitlab.baseURI | quote }}
+ {{- end }}
+ {{- if not .Values.config.providers.secretKeys.gitlabClientIDKey }}
+ - name: PENPOT_GITLAB_CLIENT_ID
+ value: {{ .Values.config.providers.gitlab.clientID | quote }}
+ {{- else }}
+ - name: PENPOT_GITLAB_CLIENT_ID
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.gitlabClientIDKey }}
+ {{- end }}
+ {{- if not .Values.config.providers.secretKeys.gitlabClientSecretKey }}
+ - name: PENPOT_GITLAB_CLIENT_SECRET
+ value: {{ .Values.config.providers.gitlab.clientSecret | quote }}
+ {{- else }}
+ - name: PENPOT_GITLAB_CLIENT_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.gitlabClientSecretKey }}
+ {{- end }}
+ {{- end }}
+
+ {{- if .Values.config.providers.oidc.enabled }}
+ {{- if .Values.config.providers.oidc.baseURI }}
+ - name: PENPOT_OIDC_BASE_URI
+ value: {{ .Values.config.providers.oidc.baseURI | quote }}
+ {{- end }}
+ {{- if not .Values.config.providers.secretKeys.oidcClientIDKey }}
+ - name: PENPOT_OIDC_CLIENT_ID
+ value: {{ .Values.config.providers.oidc.clientID | quote}}
+ {{- else }}
+ - name: PENPOT_OIDC_CLIENT_ID
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.oidcClientIDKey }}
+ {{- end }}
+ {{- if not .Values.config.providers.secretKeys.oidcClientSecretKey}}
+ - name: PENPOT_OIDC_CLIENT_SECRET
+ value: {{ .Values.config.providers.oidc.clientSecret | quote }}
+ {{- else }}
+ - name: PENPOT_OIDC_CLIENT_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.oidcClientSecretKey }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.authURI }}
+ - name: PENPOT_OIDC_AUTH_URI
+ value: {{ .Values.config.providers.oidc.authURI | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.tokenURI }}
+ - name: PENPOT_OIDC_TOKEN_URI
+ value: {{ .Values.config.providers.oidc.tokenURI | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.userURI }}
+ - name: PENPOT_OIDC_USER_URI
+ value: {{ .Values.config.providers.oidc.userURI | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.roles }}
+ - name: PENPOT_OIDC_ROLES
+ value: {{ .Values.config.providers.oidc.roles | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.rolesAttribute }}
+ - name: PENPOT_OIDC_ROLES_ATTR
+ value: {{ .Values.config.providers.oidc.rolesAttribute | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.scopes }}
+ - name: PENPOT_OIDC_SCOPES
+ value: {{ .Values.config.providers.oidc.scopes | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.nameAttribute }}
+ - name: PENPOT_OIDC_NAME_ATTR
+ value: {{ .Values.config.providers.oidc.nameAttribute | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.emailAttribute }}
+ - name: PENPOT_OIDC_EMAIL_ATTR
+ value: {{ .Values.config.providers.oidc.emailAttribute | quote }}
+ {{- end }}
+ {{- end }}
+
+ {{- if .Values.config.providers.ldap.enabled }}
+ {{- if .Values.config.providers.ldap.host }}
+ - name: PENPOT_LDAP_HOST
+ value: {{ .Values.config.providers.ldap.host | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.port }}
+ - name: PENPOT_LDAP_PORT
+ value: {{ .Values.config.providers.ldap.port | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.ssl }}
+ - name: PENPOT_LDAP_SSL
+ value: {{ .Values.config.providers.ldap.ssl | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.startTLS }}
+ - name: PENPOT_LDAP_STARTTLS
+ value: {{ .Values.config.providers.ldap.startTLS | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.baseDN }}
+ - name: PENPOT_LDAP_BASE_DN
+ value: {{ .Values.config.providers.ldap.baseDN | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.bindDN }}
+ - name: PENPOT_LDAP_BIND_DN
+ value: {{ .Values.config.providers.ldap.bindDN | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.bindPassword }}
+ - name: PENPOT_LDAP_BIND_PASSWORD
+ value: {{ .Values.config.providers.ldap.bindPassword | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.attributesUsername }}
+ - name: PENPOT_LDAP_ATTRS_USERNAME
+ value: {{ .Values.config.providers.ldap.attributesUsername | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.attributesEmail }}
+ - name: PENPOT_LDAP_ATTRS_EMAIL
+ value: {{ .Values.config.providers.ldap.attributesEmail | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.attributesFullname }}
+ - name: PENPOT_LDAP_ATTRS_FULLNAME
+ value: {{ .Values.config.providers.ldap.attributesFullname | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.attributesPhoto }}
+ - name: PENPOT_LDAP_ATTRS_PHOTO
+ value: {{ .Values.config.providers.ldap.attributesPhoto | quote }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - name: http
+ containerPort: {{ .Values.exporter.service.port }}
+ protocol: TCP
+ resources:
+ {{- toYaml .Values.exporter.resources | nindent 12 }}
+ {{- with .Values.exporter.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.exporter.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.exporter.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/charts/penpot/templates/exporter/service.yaml b/charts/penpot/templates/exporter/service.yaml
new file mode 100644
index 0000000..75985e8
--- /dev/null
+++ b/charts/penpot/templates/exporter/service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "penpot.fullname" . }}-exporter
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "penpot.labels" . | nindent 4 }}
+spec:
+ type: {{ .Values.exporter.service.type }}
+ ports:
+ - port: {{ .Values.exporter.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
+ selector:
+ {{- include "penpot.exporterSelectorLabels" . | nindent 4 }}
diff --git a/charts/penpot/templates/frontend/configmap.yaml b/charts/penpot/templates/frontend/configmap.yaml
new file mode 100644
index 0000000..b31698d
--- /dev/null
+++ b/charts/penpot/templates/frontend/configmap.yaml
@@ -0,0 +1,129 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: "{{ include "penpot.fullname" . }}-frontend-nginx"
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "penpot.labels" . | nindent 4 }}
+data:
+ nginx.conf: |
+ user www-data;
+ worker_processes auto;
+ pid /run/nginx.pid;
+ include /etc/nginx/modules-enabled/*.conf;
+
+ events {
+ worker_connections 2048;
+ # multi_accept on;
+ }
+
+ http {
+ sendfile on;
+ tcp_nopush on;
+ tcp_nodelay on;
+ keepalive_requests 30;
+ keepalive_timeout 65;
+ types_hash_max_size 2048;
+
+ server_tokens off;
+
+ reset_timedout_connection on;
+ client_body_timeout 30s;
+ client_header_timeout 30s;
+
+ include /etc/nginx/mime.types;
+ default_type application/octet-stream;
+
+ error_log /dev/stdout;
+ access_log /dev/stdout;
+
+ gzip on;
+ gzip_vary on;
+ gzip_proxied any;
+ gzip_static on;
+ gzip_comp_level 4;
+ gzip_buffers 16 8k;
+ gzip_http_version 1.1;
+
+ gzip_types text/plain text/css text/javascript application/javascript application/json application/transit+json;
+
+ resolver 127.0.0.11;
+
+ map $http_upgrade $connection_upgrade {
+ default upgrade;
+ '' close;
+ }
+
+ server {
+ listen 80 default_server;
+ server_name _;
+
+ client_max_body_size 100M;
+ charset utf-8;
+
+ proxy_http_version 1.1;
+ proxy_set_header Host $http_host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Scheme $scheme;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+
+ etag off;
+ root /var/www/app/;
+
+ location ~* \.(js|css).*$ {
+ add_header Cache-Control "max-age=86400" always; # 24 hours
+ }
+
+ location ~* \.(html).*$ {
+ add_header Cache-Control "no-cache, max-age=0" always;
+ }
+
+ location /api/export {
+ proxy_pass http://{{ include "penpot.fullname" . }}-exporter:6061;
+ }
+
+ location /api {
+ proxy_pass http://{{ include "penpot.fullname" . }}-backend:6060/api;
+ }
+
+ location /ws/notifications {
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_pass http://{{ include "penpot.fullname" . }}-backend:6060/ws/notifications;
+ }
+
+ location @handle_redirect {
+ set $redirect_uri "$upstream_http_location";
+ set $redirect_host "$upstream_http_x_host";
+ set $redirect_cache_control "$upstream_http_cache_control";
+
+ proxy_buffering off;
+
+ proxy_set_header Host "$redirect_host";
+ proxy_hide_header etag;
+ proxy_hide_header x-amz-id-2;
+ proxy_hide_header x-amz-request-id;
+ proxy_hide_header x-amz-meta-server-side-encryption;
+ proxy_hide_header x-amz-server-side-encryption;
+ proxy_pass $redirect_uri;
+
+ add_header x-internal-redirect "$redirect_uri";
+ add_header x-cache-control "$redirect_cache_control";
+ add_header cache-control "$redirect_cache_control";
+ }
+
+ location /assets {
+ proxy_pass http://{{ include "penpot.fullname" . }}-backend:6060/assets;
+ recursive_error_pages on;
+ proxy_intercept_errors on;
+ error_page 301 302 307 = @handle_redirect;
+ }
+
+ location /internal/assets {
+ internal;
+ alias /opt/data/assets;
+ add_header x-internal-redirect "$upstream_http_x_accel_redirect";
+ }
+ }
+ }
diff --git a/charts/penpot/templates/frontend/deployment.yaml b/charts/penpot/templates/frontend/deployment.yaml
new file mode 100644
index 0000000..f464a6f
--- /dev/null
+++ b/charts/penpot/templates/frontend/deployment.yaml
@@ -0,0 +1,375 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "penpot.fullname" . }}-frontend
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "penpot.labels" . | nindent 4 }}
+spec:
+ replicas: {{ .Values.frontend.replicaCount }}
+ selector:
+ matchLabels:
+ {{- include "penpot.frontendSelectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "penpot.frontendSelectorLabels" . | nindent 8 }}
+ spec:
+ {{- with .Values.global.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include "penpot.serviceAccountName" . }}
+ affinity:
+ podAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/instance
+ operator: In
+ values:
+ - {{ .Release.Name }}
+ topologyKey: "kubernetes.io/hostname"
+ containers:
+ - name: {{ .Chart.Name }}-frontend
+ image: "{{ .Values.frontend.image.repository }}:{{ .Values.frontend.image.tag }}"
+ imagePullPolicy: {{ .Values.frontend.image.imagePullPolicy }}
+ env:
+ - name: PENPOT_PUBLIC_URI
+ value: {{ .Values.config.publicURI | quote }}
+ - name: PENPOT_FLAGS
+ value: "$PENPOT_FLAGS {{ .Values.config.flags }}"
+ - name: PENPOT_SECRET_KEY
+ value: {{ .Values.config.apiSecretKey | quote }}
+ - name: PENPOT_DATABASE_URI
+ value: "postgresql://{{ .Values.config.postgresql.host }}:{{ .Values.config.postgresql.port }}/{{ .Values.config.postgresql.database }}"
+ - name: PENPOT_DATABASE_USERNAME
+ {{- if not .Values.config.postgresql.secretKeys.usernameKey }}
+ value: {{ .Values.config.postgresql.username | quote }}
+ {{- else }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.postgresql.existingSecret }}
+ key: {{ .Values.config.postgresql.secretKeys.usernameKey }}
+ {{- end }}
+ - name: PENPOT_DATABASE_PASSWORD
+ {{- if not .Values.config.postgresql.secretKeys.passwordKey }}
+ value: {{ .Values.config.postgresql.password | quote }}
+ {{- else }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.postgresql.existingSecret }}
+ key: {{ .Values.config.postgresql.secretKeys.passwordKey }}
+ {{- end }}
+ - name: PENPOT_REDIS_URI
+ value: "redis://{{ .Values.config.redis.host }}:{{ .Values.config.redis.port }}/{{ .Values.config.redis.database }}"
+ - name: PENPOT_ASSETS_STORAGE_BACKEND
+ value: {{ .Values.config.assets.storageBackend | quote }}
+ {{- if eq .Values.config.assets.storageBackend "assets-fs" }}
+ - name: PENPOT_STORAGE_ASSETS_FS_DIRECTORY
+ value: {{ .Values.config.assets.filesystem.directory | quote }}
+ {{- else if eq .Values.config.assets.storageBackend "assets-s3" }}
+ - name: PENPOT_STORAGE_ASSETS_S3_REGION
+ value: {{ .Values.config.assets.s3.region | quote }}
+ - name: PENPOT_STORAGE_ASSETS_S3_BUCKET
+ value: {{ .Values.config.assets.s3.bucket | quote }}
+ - name: AWS_ACCESS_KEY_ID
+ {{- if not .Values.config.assets.s3.secretKeys.accessKeyIDKey }}
+ value: {{ .Values.config.assets.s3.accessKeyID | quote }}
+ {{- else }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.assets.s3.existingSecret }}
+ key: {{ .Values.config.assets.s3.secretKeys.accessKeyIDKey }}
+ {{- end }}
+ - name: AWS_SECRET_ACCESS_KEY
+ {{- if not .Values.config.assets.s3.secretKeys.secretAccessKey }}
+ value: {{ .Values.config.assets.s3.secretAccessKey | quote }}
+ {{- else }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.assets.s3.existingSecret }}
+ key: {{ .Values.config.assets.s3.secretKeys.secretAccessKey }}
+ {{- end }}
+ - name: PENPOT_STORAGE_ASSETS_S3_ENDPOINT
+ {{- if not .Values.config.assets.s3.secretKeys.endpointURIKey }}
+ value: {{ .Values.config.assets.s3.endpointURI | quote }}
+ {{- else }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.assets.s3.existingSecret }}
+ key: {{ .Values.config.assets.s3.secretKeys.endpointURIKey }}
+ {{- end }}
+ {{- end }}
+ - name: PENPOT_TELEMETRY_ENABLED
+ value: {{ .Values.config.telemetryEnabled | quote }}
+
+ {{- if .Values.config.smtp.enabled }}
+ {{- if .Values.config.smtp.defaultFrom }}
+ - name: PENPOT_SMTP_DEFAULT_FROM
+ value: {{ .Values.config.smtp.defaultFrom | quote }}
+ {{- end }}
+ {{- if .Values.config.smtp.defaultReplyTo }}
+ - name: PENPOT_SMTP_DEFAULT_REPLY_TO
+ value: {{ .Values.config.smtp.defaultReplyTo | quote }}
+ {{- end }}
+ {{- if .Values.config.smtp.host }}
+ - name: PENPOT_SMTP_HOST
+ value: {{ .Values.config.smtp.host | quote }}
+ {{- end }}
+ {{- if .Values.config.smtp.port }}
+ - name: PENPOT_SMTP_PORT
+ value: {{ .Values.config.smtp.port | quote }}
+ {{- end }}
+ {{- if not .Values.config.smtp.secretKeys.usernameKey }}
+ - name: PENPOT_SMTP_USERNAME
+ value: {{ .Values.config.smtp.username | quote }}
+ {{- else }}
+ - name: PENPOT_SMTP_USERNAME
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.smtp.existingSecret }}
+ key: {{ .Values.config.smtp.secretKeys.usernameKey }}
+ {{- end }}
+ {{- if not .Values.config.smtp.secretKeys.passwordKey }}
+ - name: PENPOT_SMTP_PASSWORD
+ value: {{ .Values.config.smtp.password | quote }}
+ {{- else }}
+ - name: PENPOT_SMTP_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.smtp.existingSecret }}
+ key: {{ .Values.config.smtp.secretKeys.passwordKey }}
+ {{- end }}
+ {{- if .Values.config.smtp.tls }}
+ - name: PENPOT_SMTP_TLS
+ value: {{ .Values.config.smtp.tls | quote }}
+ {{- end }}
+ {{- if .Values.config.smtp.ssl }}
+ - name: PENPOT_SMTP_SSL
+ value: {{ .Values.config.smtp.ssl | quote }}
+ {{- end }}
+ {{- end }}
+
+
+ {{- if .Values.config.registrationDomainWhitelist }}
+ - name: PENPOT_REGISTRATION_DOMAIN_WHITELIST
+ value: {{ .Values.config.registrationDomainWhitelist | quote }}
+ {{- end }}
+
+ {{- if .Values.config.providers.google.enabled }}
+ {{- if not .Values.config.providers.secretKeys.googleClientIDKey }}
+ - name: PENPOT_GOOGLE_CLIENT_ID
+ value: {{ .Values.config.providers.google.clientID | quote }}
+ {{- else }}
+ - name: PENPOT_GOOGLE_CLIENT_ID
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.googleClientIDKey }}
+ {{- end }}
+ {{- if not .Values.config.providers.secretKeys.googleClientSecretKey}}
+ - name: PENPOT_GOOGLE_CLIENT_SECRET
+ value: {{ .Values.config.providers.google.clientSecret | quote }}
+ {{- else }}
+ - name: PENPOT_GOOGLE_CLIENT_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.googleClientSecretKey }}
+ {{- end }}
+ {{- end }}
+
+ {{- if .Values.config.providers.github.enabled }}
+ {{- if not .Values.config.providers.secretKeys.githubClientIDKey }}
+ - name: PENPOT_GITHUB_CLIENT_ID
+ value: {{ .Values.config.providers.github.clientID | quote }}
+ {{- else }}
+ - name: PENPOT_GITHUB_CLIENT_ID
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.githubClientIDKey }}
+ {{- end }}
+ {{- if not .Values.config.providers.secretKeys.githubClientSecretKey }}
+ - name: PENPOT_GITHUB_CLIENT_SECRET
+ value: {{ .Values.config.providers.github.clientSecret | quote }}
+ {{- else }}
+ - name: PENPOT_GITHUB_CLIENT_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.githubClientSecretKey }}
+ {{- end }}
+ {{- end }}
+
+ {{- if .Values.config.providers.gitlab.enabled }}
+ {{- if .Values.config.providers.gitlab.baseURI }}
+ - name: PENPOT_GITLAB_BASE_URI
+ value: {{ .Values.config.providers.gitlab.baseURI | quote }}
+ {{- end }}
+ {{- if not .Values.config.providers.secretKeys.gitlabClientIDKey }}
+ - name: PENPOT_GITLAB_CLIENT_ID
+ value: {{ .Values.config.providers.gitlab.clientID | quote }}
+ {{- else }}
+ - name: PENPOT_GITLAB_CLIENT_ID
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.gitlabClientIDKey }}
+ {{- end }}
+ {{- if not .Values.config.providers.secretKeys.gitlabClientSecretKey }}
+ - name: PENPOT_GITLAB_CLIENT_SECRET
+ value: {{ .Values.config.providers.gitlab.clientSecret | quote }}
+ {{- else }}
+ - name: PENPOT_GITLAB_CLIENT_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.gitlabClientSecretKey }}
+ {{- end }}
+ {{- end }}
+
+ {{- if .Values.config.providers.oidc.enabled }}
+ {{- if .Values.config.providers.oidc.baseURI }}
+ - name: PENPOT_OIDC_BASE_URI
+ value: {{ .Values.config.providers.oidc.baseURI | quote }}
+ {{- end }}
+ {{- if not .Values.config.providers.secretKeys.oidcClientIDKey }}
+ - name: PENPOT_OIDC_CLIENT_ID
+ value: {{ .Values.config.providers.oidc.clientID | quote}}
+ {{- else }}
+ - name: PENPOT_OIDC_CLIENT_ID
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.oidcClientIDKey }}
+ {{- end }}
+ {{- if not .Values.config.providers.secretKeys.oidcClientSecretKey}}
+ - name: PENPOT_OIDC_CLIENT_SECRET
+ value: {{ .Values.config.providers.oidc.clientSecret | quote }}
+ {{- else }}
+ - name: PENPOT_OIDC_CLIENT_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.config.providers.existingSecret }}
+ key: {{ .Values.config.providers.secretKeys.oidcClientSecretKey }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.authURI }}
+ - name: PENPOT_OIDC_AUTH_URI
+ value: {{ .Values.config.providers.oidc.authURI | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.tokenURI }}
+ - name: PENPOT_OIDC_TOKEN_URI
+ value: {{ .Values.config.providers.oidc.tokenURI | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.userURI }}
+ - name: PENPOT_OIDC_USER_URI
+ value: {{ .Values.config.providers.oidc.userURI | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.roles }}
+ - name: PENPOT_OIDC_ROLES
+ value: {{ .Values.config.providers.oidc.roles | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.rolesAttribute }}
+ - name: PENPOT_OIDC_ROLES_ATTR
+ value: {{ .Values.config.providers.oidc.rolesAttribute | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.scopes }}
+ - name: PENPOT_OIDC_SCOPES
+ value: {{ .Values.config.providers.oidc.scopes | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.nameAttribute }}
+ - name: PENPOT_OIDC_NAME_ATTR
+ value: {{ .Values.config.providers.oidc.nameAttribute | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.oidc.emailAttribute }}
+ - name: PENPOT_OIDC_EMAIL_ATTR
+ value: {{ .Values.config.providers.oidc.emailAttribute | quote }}
+ {{- end }}
+ {{- end }}
+
+ {{- if .Values.config.providers.ldap.enabled }}
+ {{- if .Values.config.providers.ldap.host }}
+ - name: PENPOT_LDAP_HOST
+ value: {{ .Values.config.providers.ldap.host | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.port }}
+ - name: PENPOT_LDAP_PORT
+ value: {{ .Values.config.providers.ldap.port | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.ssl }}
+ - name: PENPOT_LDAP_SSL
+ value: {{ .Values.config.providers.ldap.ssl | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.startTLS }}
+ - name: PENPOT_LDAP_STARTTLS
+ value: {{ .Values.config.providers.ldap.startTLS | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.baseDN }}
+ - name: PENPOT_LDAP_BASE_DN
+ value: {{ .Values.config.providers.ldap.baseDN | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.bindDN }}
+ - name: PENPOT_LDAP_BIND_DN
+ value: {{ .Values.config.providers.ldap.bindDN | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.bindPassword }}
+ - name: PENPOT_LDAP_BIND_PASSWORD
+ value: {{ .Values.config.providers.ldap.bindPassword | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.attributesUsername }}
+ - name: PENPOT_LDAP_ATTRS_USERNAME
+ value: {{ .Values.config.providers.ldap.attributesUsername | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.attributesEmail }}
+ - name: PENPOT_LDAP_ATTRS_EMAIL
+ value: {{ .Values.config.providers.ldap.attributesEmail | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.attributesFullname }}
+ - name: PENPOT_LDAP_ATTRS_FULLNAME
+ value: {{ .Values.config.providers.ldap.attributesFullname | quote }}
+ {{- end }}
+ {{- if .Values.config.providers.ldap.attributesPhoto }}
+ - name: PENPOT_LDAP_ATTRS_PHOTO
+ value: {{ .Values.config.providers.ldap.attributesPhoto | quote }}
+ {{- end }}
+ {{- end }}
+ volumeMounts:
+ - mountPath: /opt/data
+ name: app-data
+ readOnly: false
+ - mountPath: /etc/nginx/nginx.conf
+ name: "{{ include "penpot.fullname" . }}-frontend-nginx"
+ readOnly: true
+ subPath: nginx.conf
+ ports:
+ - name: http
+ containerPort: {{ .Values.frontend.service.port }}
+ protocol: TCP
+ resources:
+ {{- toYaml .Values.frontend.resources | nindent 12 }}
+ {{- with .Values.frontend.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.frontend.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.frontend.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ volumes:
+ - name: app-data
+ {{- if .Values.persistence.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ .Values.persistence.existingClaim | default ( include "penpot.fullname" . ) }}
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ - configMap:
+ defaultMode: 420
+ name: "{{ include "penpot.fullname" . }}-frontend-nginx"
+ name: "{{ include "penpot.fullname" . }}-frontend-nginx"
diff --git a/charts/penpot/templates/frontend/ingress.yaml b/charts/penpot/templates/frontend/ingress.yaml
new file mode 100644
index 0000000..4dbfa05
--- /dev/null
+++ b/charts/penpot/templates/frontend/ingress.yaml
@@ -0,0 +1,56 @@
+{{- if .Values.frontend.ingress.enabled -}}
+{{- $gitVersion := .Capabilities.KubeVersion.GitVersion -}}
+{{- $fullName := include "penpot.fullname" . -}}
+{{- $svcPort := .Values.frontend.service.port -}}
+{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: networking.k8s.io/v1
+{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: networking.k8s.io/v1beta1
+{{- else -}}
+apiVersion: extensions/v1beta1
+{{- end }}
+kind: Ingress
+metadata:
+ name: {{ $fullName }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "penpot.labels" . | nindent 4 }}
+ {{- with .Values.frontend.ingress.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+{{- if .Values.frontend.ingress.className }}
+ ingressClassName: {{ .Values.frontend.ingress.className }} # TODO(giolekva): contribute back
+{{- end }}
+{{- if .Values.frontend.ingress.tls }}
+ tls:
+ {{- range .Values.frontend.ingress.tls }} # TODO(giolekva): contribute back
+ - hosts:
+ {{- range .hosts }}
+ - {{ . | quote }}
+ {{- end }}
+ secretName: {{ .secretName }}
+ {{- end }}
+{{- end }}
+ rules:
+ {{- range .Values.frontend.ingress.hosts }}
+ - host: {{ . | quote }} # TODO(giolekva): contribute back
+ http:
+ paths:
+{{ if semverCompare ">=1.19-0" $gitVersion }}
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: {{ $fullName }}
+ port:
+ number: {{ $svcPort }}
+{{ else }}
+ - path: /
+ backend:
+ serviceName: {{ $fullName }}
+ servicePort: {{ $svcPort }}
+{{- end }}
+ {{- end }}
+{{- end }}
diff --git a/charts/penpot/templates/frontend/service.yaml b/charts/penpot/templates/frontend/service.yaml
new file mode 100644
index 0000000..2ceb04f
--- /dev/null
+++ b/charts/penpot/templates/frontend/service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "penpot.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "penpot.labels" . | nindent 4 }}
+spec:
+ type: {{ .Values.frontend.service.type }}
+ ports:
+ - port: {{ .Values.frontend.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
+ selector:
+ {{- include "penpot.frontendSelectorLabels" . | nindent 4 }}
diff --git a/charts/penpot/templates/pvc.yaml b/charts/penpot/templates/pvc.yaml
new file mode 100644
index 0000000..d093f36
--- /dev/null
+++ b/charts/penpot/templates/pvc.yaml
@@ -0,0 +1,24 @@
+{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{ include "penpot.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+{{- include "penpot.labels" . | nindent 4 }}
+{{- if .Values.persistence.annotations }}
+ annotations:
+{{ toYaml .Values.persistence.annotations | indent 4 }}
+{{- end }}
+spec:
+ accessModes:
+ {{- range .Values.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size | quote }}
+{{- if .Values.persistence.storageClass }}
+ storageClassName: "{{ .Values.persistence.storageClass }}"
+{{- end }}
+{{- end -}}
diff --git a/charts/penpot/templates/serviceaccount.yaml b/charts/penpot/templates/serviceaccount.yaml
new file mode 100644
index 0000000..8c317c4
--- /dev/null
+++ b/charts/penpot/templates/serviceaccount.yaml
@@ -0,0 +1,13 @@
+{{- if .Values.serviceAccount.enabled -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "penpot.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "penpot.labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end -}}
diff --git a/charts/penpot/values.yaml b/charts/penpot/values.yaml
new file mode 100644
index 0000000..78dba0f
--- /dev/null
+++ b/charts/penpot/values.yaml
@@ -0,0 +1,486 @@
+## Default values for Penpot
+
+## @section Global parameters
+
+## @param global.postgresqlEnabled Whether to deploy the Bitnami PostgreSQL chart as subchart. Check [the official chart](https://artifacthub.io/packages/helm/bitnami/postgresql) for configuration.
+## @param global.redisEnabled Whether to deploy the Bitnami Redis chart as subchart. Check [the official chart](https://artifacthub.io/packages/helm/bitnami/redis) for configuration.
+## @param global.imagePullSecrets Global Docker registry secret names as an array.
+##
+global:
+ postgresqlEnabled: false
+ redisEnabled: false
+ ## E.g.
+ ## imagePullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ imagePullSecrets: []
+
+## @section Common parameters
+
+## @param nameOverride String to partially override common.names.fullname
+##
+nameOverride: ""
+## @param fullnameOverride String to fully override common.names.fullname
+##
+fullnameOverride: ""
+## @param serviceAccount.enabled Specifies whether a ServiceAccount should be created.
+## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`.
+## @param serviceAccount.name The name of the ServiceAccount to use. If not set and enabled is true, a name is generated using the fullname template.
+##
+serviceAccount:
+ enabled: true
+ annotations: {}
+ name: ""
+
+## @section Backend parameters
+
+## Penpot Backend
+##
+backend:
+ ## @param backend.image.repository The Docker repository to pull the image from.
+ ## @param backend.image.tag The image tag to use.
+ ## @param backend.image.imagePullPolicy The image pull policy to use.
+ ##
+ image:
+ repository: penpotapp/backend
+ tag: 1.16.0-beta
+ imagePullPolicy: IfNotPresent
+ ## @param backend.replicaCount The number of replicas to deploy.
+ ##
+ replicaCount: 1
+ ## @param backend.service.type The service type to create.
+ ## @param backend.service.port The service port to use.
+ ##
+ service:
+ type: ClusterIP
+ port: 6060
+ ## Configure Pods Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+ ## @param backend.podSecurityContext.enabled Enabled Penpot pods' security context
+ ## @param backend.podSecurityContext.fsGroup Set Penpot pod's security context fsGroup
+ ##
+ podSecurityContext:
+ enabled: true
+ fsGroup: 1001
+ ## Configure Container Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+ ## @param backend.containerSecurityContext.enabled Enabled Penpot containers' security context
+ ## @param backend.containerSecurityContext.runAsUser Set Penpot containers' security context runAsUser
+ ## @param backend.containerSecurityContext.allowPrivilegeEscalation Set Penpot containers' security context allowPrivilegeEscalation
+ ## @param backend.containerSecurityContext.capabilities.drop Set Penpot containers' security context capabilities to be dropped
+ ## @param backend.containerSecurityContext.readOnlyRootFilesystem Set Penpot containers' security context readOnlyRootFilesystem
+ ## @param backend.containerSecurityContext.runAsNonRoot Set Penpot container's security context runAsNonRoot
+ ##
+ containerSecurityContext:
+ enabled: true
+ runAsUser: 1001
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - all
+ readOnlyRootFilesystem: false
+ runAsNonRoot: true
+ ## @param backend.affinity Affinity for Penpot pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ##
+ affinity: {}
+ ## @param backend.nodeSelector Node labels for Penpot pods assignment
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+ ## @param backend.tolerations Tolerations for Penpot pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ ## Penpot backend resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param backend.resources.limits The resources limits for the Penpot backend containers
+ ## @param backend.resources.requests The requested resources for the Penpot backend containers
+ ##
+ resources:
+ limits: {}
+ requests: {}
+
+## @section Frontend parameters
+
+## Penpot Frontend
+##
+frontend:
+ ## @param frontend.image.repository The Docker repository to pull the image from.
+ ## @param frontend.image.tag The image tag to use.
+ ## @param frontend.image.imagePullPolicy The image pull policy to use.
+ ##
+ image:
+ repository: penpotapp/frontend
+ tag: 1.16.0-beta
+ imagePullPolicy: IfNotPresent
+ ## @param frontend.replicaCount The number of replicas to deploy.
+ ##
+ replicaCount: 1
+ ## @param frontend.service.type The service type to create.
+ ## @param frontend.service.port The service port to use.
+ ##
+ service:
+ type: ClusterIP
+ port: 80
+ ## @param frontend.ingress.enabled Enable ingress record generation for Penpot frontend.
+ ## @param frontend.ingress.annotations Mapped annotations for the frontend ingress.
+ ## @param frontend.ingress.hosts Array style hosts for the frontend ingress.
+ ## @param frontend.ingress.tls Array style TLS secrets for the frontend ingress.
+ ##
+ ingress:
+ enabled: false
+ className: "" # TODO(giolekva): contribute
+ ## E.g.
+ ## annotations:
+ ## kubernetes.io/ingress.class: nginx
+ ## kubernetes.io/tls-acme: "true"
+ ##
+ annotations:
+ {}
+ ## E.g.
+ ## hosts:
+ ## - host: penpot-example.local
+ hosts: []
+ ## E.g.
+ ## - secretName: chart-example-tls
+ ## hosts:
+ ## - chart-example.local
+ tls: []
+ ## @param frontend.affinity Affinity for Penpot pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ##
+ affinity: {}
+ ## @param frontend.nodeSelector Node labels for Penpot pods assignment
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+ ## @param frontend.tolerations Tolerations for Penpot pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ ## Penpot frontend resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param frontend.resources.limits The resources limits for the Penpot frontend containers
+ ## @param frontend.resources.requests The requested resources for the Penpot frontend containers
+ ##
+ resources:
+ limits: {}
+ requests: {}
+
+## @section Exporter parameters
+
+## Penpot Exporter
+##
+exporter:
+ ## @param exporter.image.repository The Docker repository to pull the image from.
+ ## @param exporter.image.tag The image tag to use.
+ ## @param exporter.image.imagePullPolicy The image pull policy to use.
+ ##
+ image:
+ repository: penpotapp/exporter
+ tag: 1.16.0-beta
+ imagePullPolicy: IfNotPresent
+ ## @param exporter.replicaCount The number of replicas to deploy.
+ ##
+ replicaCount: 1
+ ## @param exporter.service.type The service type to create.
+ ## @param exporter.service.port The service port to use.
+ ##
+ service:
+ type: ClusterIP
+ port: 6061
+ ## Configure Pods Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+ ## @param exporter.podSecurityContext.enabled Enabled Penpot pods' security context
+ ## @param exporter.podSecurityContext.fsGroup Set Penpot pod's security context fsGroup
+ ##
+ podSecurityContext:
+ enabled: true
+ fsGroup: 1001
+ ## Configure Container Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+ ## @param exporter.containerSecurityContext.enabled Enabled Penpot containers' security context
+ ## @param exporter.containerSecurityContext.runAsUser Set Penpot containers' security context runAsUser
+ ## @param exporter.containerSecurityContext.allowPrivilegeEscalation Set Penpot containers' security context allowPrivilegeEscalation
+ ## @param exporter.containerSecurityContext.capabilities.drop Set Penpot containers' security context capabilities to be dropped
+ ## @param exporter.containerSecurityContext.readOnlyRootFilesystem Set Penpot containers' security context readOnlyRootFilesystem
+ ## @param exporter.containerSecurityContext.runAsNonRoot Set Penpot container's security context runAsNonRoot
+ ##
+ containerSecurityContext:
+ enabled: true
+ runAsUser: 1001
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - all
+ readOnlyRootFilesystem: false
+ runAsNonRoot: true
+ ## @param exporter.affinity Affinity for Penpot pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ##
+ affinity: {}
+ ## @param exporter.nodeSelector Node labels for Penpot pods assignment
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+ ## @param exporter.tolerations Tolerations for Penpot pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ ## Penpot exporter resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param exporter.resources.limits The resources limits for the Penpot exporter containers
+ ## @param exporter.resources.requests The requested resources for the Penpot exporter containers
+ ##
+ resources:
+ limits: {}
+ requests: {}
+
+## @section Persistence parameters
+
+## Penpot persistence
+##
+persistence:
+ ## @param persistence.enabled Enable persistence using Persistent Volume Claims.
+ ##
+ enabled: false
+ ## @param persistence.storageClass Persistent Volume storage class.
+ ## If defined, storageClassName: <storageClass>.
+ ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner.
+ ##
+ storageClass: ""
+ ## @param persistence.size Persistent Volume size.
+ ##
+ size: 8Gi
+ ## @param persistence.existingClaim The name of an existing PVC to use for persistence.
+ ##
+ existingClaim: ""
+ ## @param persistence.accessModes Persistent Volume access modes.
+ ##
+ accessModes:
+ - ReadWriteOnce
+ ## @param persistence.annotations Persistent Volume Claim annotations.
+ ##
+ annotations: {}
+
+## @section Configuration parameters
+
+## Penpot configuration
+##
+config:
+ ## @param config.publicURI The public domain to serve Penpot on. Set `disable-secure-session-cookies` in the flags if you plan on serving it on a non HTTPS domain.
+ ## @param config.flags The feature flags to enable. Check [the official docs](https://help.penpot.app/technical-guide/configuration/) for more info.
+ ## @param config.apiSecretKey A random secret key needed for persistent user sessions. Generate with `openssl rand -hex 16` for example.
+ ##
+ publicURI: "http://localhost:8080"
+ flags: "enable-registration enable-login disable-demo-users disable-demo-warning"
+ apiSecretKey: "b46a12cb4bedc6b9df8cb3f18c708b65"
+ ## @param config.postgresql.host The PostgreSQL host to connect to.
+ ## @param config.postgresql.port The PostgreSQL host port to use.
+ ## @param config.postgresql.database The PostgreSQL database to use.
+ ## @param config.postgresql.username The database username to use.
+ ## @param config.postgresql.password The database username to use.
+ ## @param config.postgresql.existingSecret The name of an existing secret.
+ ## @param config.postgresql.secretKeys.usernameKey The username key to use from an existing secret.
+ ## @param config.postgresql.secretKeys.passwordKey The password key to use from an existing secret.
+ ##
+ postgresql:
+ host: "postgresql.penpot.svc.cluster.local"
+ port: 5432
+ username: ""
+ password: ""
+ database: ""
+ existingSecret: ""
+ secretKeys:
+ usernameKey: ""
+ passwordKey: ""
+ ## @param config.redis.host The Redis host to connect to.
+ ## @param config.redis.port The Redis host port to use.
+ ## @param config.redis.database The Redis database to connect to.
+ ##
+ redis:
+ host: "redis-headless.penpot.svc.cluster.local"
+ port: 6379
+ database: "0"
+ ## @param config.assets.storageBackend The storage backend for assets to use. Use `assets-fs` for filesystem, and `assets-s3` for S3.
+ ## @param config.assets.filesystem.directory The storage directory to use if you chose the filesystem storage backend.
+ ## @param config.assets.s3.accessKeyID The S3 access key ID to use if you chose the S3 storage backend.
+ ## @param config.assets.s3.secretAccessKey The S3 secret access key to use if you chose the S3 storage backend.
+ ## @param config.assets.s3.region The S3 region to use if you chose the S3 storage backend.
+ ## @param config.assets.s3.bucket The name of the S3 bucket to use if you chose the S3 storage backend.
+ ## @param config.assets.s3.endpointURI The S3 endpoint URI to use if you chose the S3 storage backend.
+ ## @param config.assets.s3.existingSecret The name of an existing secret.
+ ## @param config.assets.s3.secretKeys.accessKeyIDKey The S3 access key ID to use from an existing secret.
+ ## @param config.assets.s3.secretKeys.secretAccessKey The S3 secret access key to use from an existing secret.
+ ## @param config.assets.s3.secretKeys.endpointURIKey The S3 endpoint URI to use from an existing secret.
+ ##
+ assets:
+ storageBackend: "assets-fs"
+ filesystem:
+ directory: "/opt/data/assets"
+ s3:
+ accessKeyID: ""
+ secretAccessKey: ""
+ region: ""
+ bucket: ""
+ endpointURI: ""
+ existingSecret: ""
+ secretKeys:
+ accessKeyIDKey: ""
+ secretAccessKey: ""
+ endpointURIKey: ""
+ ## @param config.telemetryEnabled Whether to enable sending of anonymous telemetry data.
+ ##
+ telemetryEnabled: true
+ ## @param config.smtp.enabled Whether to enable SMTP configuration. You also need to add the 'enable-smtp' flag to the PENPOT_FLAGS variable.
+ ## @param config.smtp.defaultFrom The SMTP default email to send from.
+ ## @param config.smtp.defaultReplyTo The SMTP default email to reply to.
+ ## @param config.smtp.host The SMTP host to use.
+ ## @param config.smtp.port The SMTP host port to use.
+ ## @param config.smtp.username The SMTP username to use.
+ ## @param config.smtp.password The SMTP password to use.
+ ## @param config.smtp.tls Whether to use TLS for the SMTP connection.
+ ## @param config.smtp.ssl Whether to use SSL for the SMTP connection.
+ ## @param config.smtp.existingSecret The name of an existing secret.
+ ## @param config.smtp.secretKeys.usernameKey The SMTP username to use from an existing secret.
+ ## @param config.smtp.secretKeys.passwordKey The SMTP password to use from an existing secret.
+ ##
+ smtp:
+ enabled: false
+ defaultFrom: ""
+ defaultReplyTo: ""
+ host: ""
+ port: ""
+ username: ""
+ password: ""
+ tls: true
+ ssl: false
+ existingSecret: ""
+ secretKeys:
+ usernameKey: ""
+ passwordKey: ""
+ ## @param config.registrationDomainWhitelist Comma separated list of allowed domains to register. Empty to allow all domains.
+ ##
+ registrationDomainWhitelist: ""
+ ## Penpot Authentication providers parameters
+ ##
+ providers:
+ ## @param config.providers.google.enabled Whether to enable Google configuration. To enable Google auth, add `enable-login-with-google` to the flags.
+ ## @param config.providers.google.clientID The Google client ID to use. To enable Google auth, add `enable-login-with-google` to the flags.
+ ## @param config.providers.google.clientSecret The Google client secret to use. To enable Google auth, add `enable-login-with-google` to the flags.
+ ##
+ google:
+ enabled: false
+ clientID: ""
+ clientSecret: ""
+ ## @param config.providers.github.enabled Whether to enable GitHub configuration. To enable GitHub auth, also add `enable-login-with-github` to the flags.
+ ## @param config.providers.github.clientID The GitHub client ID to use.
+ ## @param config.providers.github.clientSecret The GitHub client secret to use.
+ ##
+ github:
+ enabled: false
+ clientID: ""
+ clientSecret: ""
+ ## @param config.providers.gitlab.enabled Whether to enable GitLab configuration. To enable GitLab auth, also add `enable-login-with-gitlab` to the flags.
+ ## @param config.providers.gitlab.baseURI The GitLab base URI to use.
+ ## @param config.providers.gitlab.clientID The GitLab client ID to use.
+ ## @param config.providers.gitlab.clientSecret The GitLab client secret to use.
+ ##
+ gitlab:
+ enabled: false
+ baseURI: "https://gitlab.com"
+ clientID: ""
+ clientSecret: ""
+ ## @param config.providers.oidc.enabled Whether to enable OIDC configuration. To enable OpenID Connect auth, also add `enable-login-with-oidc` to the flags.
+ ## @param config.providers.oidc.baseURI The OpenID Connect base URI to use.
+ ## @param config.providers.oidc.clientID The OpenID Connect client ID to use.
+ ## @param config.providers.oidc.clientSecret The OpenID Connect client secret to use.
+ ## @param config.providers.oidc.authURI Optional OpenID Connect auth URI to use. Auto discovered if not provided.
+ ## @param config.providers.oidc.tokenURI Optional OpenID Connect token URI to use. Auto discovered if not provided.
+ ## @param config.providers.oidc.userURI Optional OpenID Connect user URI to use. Auto discovered if not provided.
+ ## @param config.providers.oidc.roles Optional OpenID Connect roles to use. If no role is provided, roles checking disabled.
+ ## @param config.providers.oidc.rolesAttribute Optional OpenID Connect roles attribute to use. If not provided, the roles checking will be disabled.
+ ## @param config.providers.oidc.scopes Optional OpenID Connect scopes to use. This settings allow overwrite the required scopes, use with caution because penpot requres at least `name` and `email` attrs found on the user info. Optional, defaults to `openid profile`.
+ ## @param config.providers.oidc.nameAttribute Optional OpenID Connect name attribute to use. If not provided, the `name` prop will be used.
+ ## @param config.providers.oidc.emailAttribute Optional OpenID Connect email attribute to use. If not provided, the `email` prop will be used.
+ ##
+ oidc:
+ enabled: false
+ baseURI: ""
+ clientID: ""
+ clientSecret: ""
+ authURI: ""
+ tokenURI: ""
+ userURI: ""
+ roles: "role1 role2"
+ rolesAttribute: ""
+ scopes: "scope1 scope2"
+ nameAttribute: ""
+ emailAttribute: ""
+ ## @param config.providers.ldap.enabled Whether to enable LDAP configuration. To enable LDAP, also add `enable-login-with-ldap` to the flags.
+ ## @param config.providers.ldap.host The LDAP host to use.
+ ## @param config.providers.ldap.port The LDAP port to use.
+ ## @param config.providers.ldap.ssl Whether to use SSL for the LDAP connection.
+ ## @param config.providers.ldap.startTLS Whether to utilize StartTLS for the LDAP connection.
+ ## @param config.providers.ldap.baseDN The LDAP base DN to use.
+ ## @param config.providers.ldap.bindDN The LDAP bind DN to use.
+ ## @param config.providers.ldap.bindPassword The LDAP bind password to use.
+ ## @param config.providers.ldap.attributesUsername The LDAP attributes username to use.
+ ## @param config.providers.ldap.attributesEmail The LDAP attributes email to use.
+ ## @param config.providers.ldap.attributesFullname The LDAP attributes fullname to use.
+ ## @param config.providers.ldap.attributesPhoto The LDAP attributes photo format to use.
+ ##
+ ldap:
+ enabled: false
+ host: "ldap"
+ port: 10389
+ ssl: false
+ startTLS: false
+ baseDN: "ou=people,dc=planetexpress,dc=com"
+ bindDN: "cn=admin,dc=planetexpress,dc=com"
+ bindPassword: "GoodNewsEveryone"
+ attributesUsername: "uid"
+ attributesEmail: "mail"
+ attributesFullname: "cn"
+ attributesPhoto: "jpegPhoto"
+ ## @param config.providers.existingSecret The name of an existing secret to use.
+ ## @param config.providers.secretKeys.googleClientIDKey The Google client ID key to use from an existing secret.
+ ## @param config.providers.secretKeys.googleClientSecretKey The Google client secret key to use from an existing secret.
+ ## @param config.providers.secretKeys.githubClientIDKey The GitHub client ID key to use from an existing secret.
+ ## @param config.providers.secretKeys.githubClientSecretKey The GitHub client secret key to use from an existing secret.
+ ## @param config.providers.secretKeys.gitlabClientIDKey The GitLab client ID key to use from an existing secret.
+ ## @param config.providers.secretKeys.gitlabClientSecretKey The GitLab client secret key to use from an existing secret.
+ ## @param config.providers.secretKeys.oidcClientIDKey The OpenID Connect client ID key to use from an existing secret.
+ ## @param config.providers.secretKeys.oidcClientSecretKey The OpenID Connect client secret key to use from an existing secret.
+ ##
+ existingSecret: ""
+ secretKeys:
+ googleClientIDKey: ""
+ googleClientSecretKey: ""
+ githubClientIDKey: ""
+ githubClientSecretKey: ""
+ gitlabClientIDKey: ""
+ gitlabClientSecretKey: ""
+ oidcClientIDKey: ""
+ oidcClientSecretKey: ""
+
+## @section PostgreSQL configuration (Check for [more parameters here](https://artifacthub.io/packages/helm/bitnami/postgresql))
+
+## @param postgresql.auth.username Name for a custom user to create.
+## @param postgresql.auth.password Password for the custom user to create.
+## @param postgresql.auth.database Name for a custom database to create.
+##
+postgresql:
+ auth:
+ username: example
+ password: secretpassword
+ database: penpot
+
+## @section Redis configuration (Check for [more parameters here](https://artifacthub.io/packages/helm/bitnami/redis))
+
+## @param redis.auth.enabled Whether to enable password authentication.
+##
+redis:
+ auth:
+ enabled: false
diff --git a/charts/pihole/.helmignore b/charts/pihole/.helmignore
new file mode 100644
index 0000000..4c2748c
--- /dev/null
+++ b/charts/pihole/.helmignore
@@ -0,0 +1,27 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+
+# Manually added entries
+ci/
+examples/
+Makefile
+README.md.gotmpl
diff --git a/charts/pihole/Chart.yaml b/charts/pihole/Chart.yaml
new file mode 100644
index 0000000..8b087a8
--- /dev/null
+++ b/charts/pihole/Chart.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+appVersion: 2023.11.0
+description: Installs pihole in kubernetes
+home: https://github.com/MoJo2600/pihole-kubernetes/tree/master/charts/pihole
+icon: https://i2.wp.com/pi-hole.net/wp-content/uploads/2016/12/Vortex-R.png
+maintainers:
+- email: christian.erhardt@mojo2k.de
+ name: MoJo2600
+name: pihole
+sources:
+- https://github.com/MoJo2600/pihole-kubernetes/tree/master/charts/pihole
+- https://pi-hole.net/
+- https://github.com/pi-hole
+- https://github.com/pi-hole/docker-pi-hole
+version: 2.20.1
diff --git a/charts/pihole/README.md b/charts/pihole/README.md
new file mode 100644
index 0000000..610a521
--- /dev/null
+++ b/charts/pihole/README.md
@@ -0,0 +1,403 @@
+# pihole
+
+Installs pihole in kubernetes
+
+  <!-- ALL-CONTRIBUTORS-BADGE:START - Do not remove or modify this section -->
+[](#contributors-)
+<!-- ALL-CONTRIBUTORS-BADGE:END -->
+
+## Source Code
+
+* <https://github.com/MoJo2600/pihole-kubernetes/tree/master/charts/pihole>
+* <https://pi-hole.net/>
+* <https://github.com/pi-hole>
+* <https://github.com/pi-hole/docker-pi-hole>
+
+## Installation
+
+Jeff Geerling on YouTube made a video about the installation of this chart:
+
+[](https://youtu.be/IafVCHkJbtI?t=2655)
+
+### Add Helm repository
+
+```shell
+helm repo add mojo2600 https://mojo2600.github.io/pihole-kubernetes/
+helm repo update
+```
+
+### Configure the chart
+
+The following items can be set via `--set` flag during installation or configured by editing the `values.yaml` directly.
+
+#### Configure the way how to expose pihole service:
+
+- **Ingress**: The ingress controller must be installed in the Kubernetes cluster.
+- **ClusterIP**: Exposes the service on a cluster-internal IP. Choosing this value makes the service only reachable from within the cluster.
+- **LoadBalancer**: Exposes the service externally using a cloud provider’s load balancer.
+
+## My settings in values.yaml
+
+```console
+dnsmasq:
+ customDnsEntries:
+ - address=/nas/192.168.178.10
+
+ customCnameEntries:
+ - cname=foo.nas,nas
+
+persistentVolumeClaim:
+ enabled: true
+
+serviceWeb:
+ loadBalancerIP: 192.168.178.252
+ annotations:
+ metallb.universe.tf/allow-shared-ip: pihole-svc
+ type: LoadBalancer
+
+serviceDns:
+ loadBalancerIP: 192.168.178.252
+ annotations:
+ metallb.universe.tf/allow-shared-ip: pihole-svc
+ type: LoadBalancer
+```
+
+## Configuring Upstream DNS Resolvers
+
+By default, `pihole-kubernetes` will configure pod DNS automatically to use Google's `8.8.8.8` nameserver for upstream
+DNS resolution. You can configure this, or opt-out of pod DNS configuration completely.
+
+### Changing The Upstream DNS Resolver
+
+For example, to use Cloudflare's resolver:
+
+```yaml
+podDnsConfig:
+ enabled: true
+ policy: "None"
+ nameservers:
+ - 127.0.0.1
+ - 1.1.1.1
+```
+
+### Disabling Pod DNS Configuration
+
+If you have other DNS policy at play (for example, when running a service mesh), you may not want to have
+`pihole-kubernetes` control this behavior. In that case, you can disable DNS configuration on `pihole` pods:
+
+```yaml
+podDnsConfig:
+ enabled: false
+```
+
+## Upgrading
+
+### To 2.0.0
+
+This version splits the DHCP service into its own resource and puts the configuration to `serviceDhcp`.
+
+**If you have not changed any configuration for `serviceDns`, you don’t need to do anything.**
+
+If you have changed your `serviceDns` configuration, **copy** your `serviceDns` section into a new `serviceDhcp` section.
+
+### To 1.8.22
+
+To enhance compatibility for Traefik, we split the TCP and UDP service into Web and DNS. This means, if you have a dedicated configuration for the service, you have to
+update your `values.yaml` and add a new configuration for this new service.
+
+Before (In my case, with metallb):
+```
+serviceTCP:
+ loadBalancerIP: 192.168.178.252
+ annotations:
+ metallb.universe.tf/allow-shared-ip: pihole-svc
+
+serviceUDP:
+ loadBalancerIP: 192.168.178.252
+ annotations:
+ metallb.universe.tf/allow-shared-ip: pihole-svc
+```
+
+After:
+```
+serviceWeb:
+ loadBalancerIP: 192.168.178.252
+ annotations:
+ metallb.universe.tf/allow-shared-ip: pihole-svc
+
+serviceDns:
+ loadBalancerIP: 192.168.178.252
+ annotations:
+ metallb.universe.tf/allow-shared-ip: pihole-svc
+```
+
+Version 1.8.22 has switched from the deprecated ingress api `extensions/v1beta1` to the go forward version `networking.k8s.io/v1`. This means that your cluster must be running 1.19.x as this api is not available on older versions. If necessary to run on an older Kubernetes Version, it can be done by modifying the ingress.yaml and changing the api definition back. The backend definition would also change from:
+
+```
+ backend:
+ service:
+ name: \{\{ $serviceName \}\}
+ port:
+ name: http
+```
+to:
+```
+ backend:
+ serviceName: \{\{ $serviceName \}\}
+ servicePort: http
+```
+
+## Uninstallation
+
+To uninstall/delete the `my-release` deployment (NOTE: `--purge` is default behaviour in Helm 3+ and will error):
+
+```bash
+helm delete --purge my-release
+```
+
+## Configuration
+
+The following table lists the configurable parameters of the pihole chart and the default values.
+
+## Values
+
+| Key | Type | Default | Description |
+|-----|------|---------|-------------|
+| DNS1 | string | `"8.8.8.8"` | default upstream DNS 1 server to use |
+| DNS2 | string | `"8.8.4.4"` | default upstream DNS 2 server to use |
+| adlists | object | `{}` | list of adlists to import during initial start of the container |
+| admin | object | `{"existingSecret":"","passwordKey":"password"}` | Use an existing secret for the admin password. |
+| admin.enabled | bool | `true` | If set to false admin password will be disabled, adminPassword specified above and the pre-existing secret (if specified) will be ignored. |
+| admin.existingSecret | string | `""` | Specify an existing secret to use as admin password |
+| admin.passwordKey | string | `"password"` | Specify the key inside the secret to use |
+| adminPassword | string | `"admin"` | Administrator password when not using an existing secret (see below) |
+| affinity | object | `{}` | |
+| antiaff.avoidRelease | string | `"pihole1"` | Here you can set the pihole release (you set in `helm install <releasename> ...`) you want to avoid |
+| antiaff.enabled | bool | `false` | set to true to enable antiaffinity (example: 2 pihole DNS in the same cluster) |
+| antiaff.strict | bool | `true` | Here you can choose between preferred or required |
+| antiaff.namespaces | '[]' | list of namespaces to include in anti-affinity settings
+| blacklist | object | `{}` | list of blacklisted domains to import during initial start of the container |
+| customVolumes.config | object | `{}` | any volume type can be used here |
+| customVolumes.enabled | bool | `false` | set this to true to enable custom volumes |
+| dnsHostPort.enabled | bool | `false` | set this to true to enable dnsHostPort |
+| dnsHostPort.port | int | `53` | default port for this pod |
+| dnsmasq.additionalHostsEntries | list | `[]` | Dnsmasq reads the /etc/hosts file to resolve ips. You can add additional entries if you like |
+| dnsmasq.customCnameEntries | list | `[]` | Here we specify custom cname entries that should point to `A` records or elements in customDnsEntries array. The format should be: - cname=cname.foo.bar,foo.bar - cname=cname.bar.foo,bar.foo - cname=cname record,dns record |
+| dnsmasq.customDnsEntries | list | `[]` | Add custom dns entries to override the dns resolution. All lines will be added to the pihole dnsmasq configuration. |
+| dnsmasq.customSettings | string | `nil` | Other options |
+| dnsmasq.staticDhcpEntries | list | `[]` | Static DHCP config |
+| dnsmasq.upstreamServers | list | `[]` | Add upstream dns servers. All lines will be added to the pihole dnsmasq configuration |
+| doh.enabled | bool | `false` | set to true to enabled DNS over HTTPs via cloudflared |
+| doh.envVars | object | `{}` | Here you can pass environment variables to the DoH container, for example: |
+| doh.name | string | `"cloudflared"` | |
+| doh.probes | object | `{"liveness":{"enabled":true,"failureThreshold":10,"initialDelaySeconds":60,"probe":{"exec":{"command":["nslookup","-po=5053","cloudflare.com","127.0.0.1"]}},"timeoutSeconds":5}}` | Probes configuration |
+| doh.probes.liveness | object | `{"enabled":true,"failureThreshold":10,"initialDelaySeconds":60,"probe":{"exec":{"command":["nslookup","-po=5053","cloudflare.com","127.0.0.1"]}},"timeoutSeconds":5}` | Configure the healthcheck for the doh container |
+| doh.probes.liveness.enabled | bool | `true` | set to true to enable liveness probe |
+| doh.probes.liveness.failureThreshold | int | `10` | defines the failure threshold for the liveness probe |
+| doh.probes.liveness.initialDelaySeconds | int | `60` | defines the initial delay for the liveness probe |
+| doh.probes.liveness.probe | object | `{"exec":{"command":["nslookup","-po=5053","cloudflare.com","127.0.0.1"]}}` | customize the liveness probe |
+| doh.probes.liveness.timeoutSeconds | int | `5` | defines the timeout in secondes for the liveness probe |
+| doh.pullPolicy | string | `"IfNotPresent"` | |
+| doh.repository | string | `"crazymax/cloudflared"` | |
+| doh.tag | string | `"latest"` | |
+| dualStack.enabled | bool | `false` | set this to true to enable creation of DualStack services or creation of separate IPv6 services if `serviceDns.type` is set to `"LoadBalancer"` |
+| extraEnvVars | object | `{}` | extraEnvironmentVars is a list of extra enviroment variables to set for pihole to use |
+| extraEnvVarsSecret | object | `{}` | extraEnvVarsSecret is a list of secrets to load in as environment variables. |
+| extraInitContainers | list | `[]` | any initContainers you might want to run before starting pihole |
+| extraObjects | list | `[]` | any extra kubernetes manifests you might want |
+| extraVolumeMounts | object | `{}` | any extra volume mounts you might want |
+| extraVolumes | object | `{}` | any extra volumes you might want |
+| ftl | object | `{}` | values that should be added to pihole-FTL.conf |
+| hostNetwork | string | `"false"` | should the container use host network |
+| hostname | string | `""` | hostname of pod |
+| image.pullPolicy | string | `"IfNotPresent"` | the pull policy |
+| image.repository | string | `"pihole/pihole"` | the repostory to pull the image from |
+| image.tag | string | `""` | the docker tag, if left empty it will get it from the chart's appVersion |
+| ingress | object | `{"annotations":{},"enabled":false,"hosts":["chart-example.local"],"path":"/","tls":[]}` | Configuration for the Ingress |
+| ingress.annotations | object | `{}` | Annotations for the ingress |
+| ingress.enabled | bool | `false` | Generate a Ingress resource |
+| maxSurge | int | `1` | The maximum number of Pods that can be created over the desired number of `ReplicaSet` during updating. |
+| maxUnavailable | int | `1` | The maximum number of Pods that can be unavailable during updating |
+| monitoring.podMonitor | object | `{"enabled":false}` | Preferably adding prometheus scrape annotations rather than enabling podMonitor. |
+| monitoring.podMonitor.enabled | bool | `false` | set this to true to enable podMonitor |
+| monitoring.sidecar | object | `{"enabled":false,"image":{"pullPolicy":"IfNotPresent","repository":"ekofr/pihole-exporter","tag":"v0.3.0"},"port":9617,"resources":{"limits":{"memory":"128Mi"}}}` | Sidecar configuration |
+| monitoring.sidecar.enabled | bool | `false` | set this to true to enable podMonitor as sidecar |
+| nodeSelector | object | `{}` | |
+| persistentVolumeClaim | object | `{"accessModes":["ReadWriteOnce"],"annotations":{},"enabled":false,"size":"500Mi"}` | `spec.PersitentVolumeClaim` configuration |
+| persistentVolumeClaim.annotations | object | `{}` | Annotations for the `PersitentVolumeClaim` |
+| persistentVolumeClaim.enabled | bool | `false` | set to true to use pvc |
+| podAnnotations | object | `{}` | Additional annotations for pods |
+| podDnsConfig.enabled | bool | `true` | |
+| podDnsConfig.nameservers[0] | string | `"127.0.0.1"` | |
+| podDnsConfig.nameservers[1] | string | `"8.8.8.8"` | |
+| podDnsConfig.policy | string | `"None"` | |
+| privileged | string | `"false"` | should container run in privileged mode |
+| capabilities | object | `{}` | Linux capabilities that container should run with |
+| probes | object | `{"liveness":{"type": "httpGet","enabled":true,"failureThreshold":10,"initialDelaySeconds":60,"port":"http","scheme":"HTTP","timeoutSeconds":5},"readiness":{"enabled":true,"failureThreshold":3,"initialDelaySeconds":60,"port":"http","scheme":"HTTP","timeoutSeconds":5}}` | Probes configuration |
+| probes.liveness.enabled | bool | `true` | Generate a liveness probe |
+| probes.liveness.type | string | `httpGet` | Defines the type of liveness probe. (httpGet, command) |
+| probes.liveness.command | list | [] | A list of commands to execute as a liveness probe (Requires `type` to be set to `command`) |
+| probes.readiness.enabled | bool | `true` | Generate a readiness probe |
+| regex | object | `{}` | list of blacklisted regex expressions to import during initial start of the container |
+| replicaCount | int | `1` | The number of replicas |
+| resources | object | `{}` | lines, adjust them as necessary, and remove the curly braces after 'resources:'. |
+| serviceDhcp | object | `{"annotations":{},"enabled":true,"externalTrafficPolicy":"Local","loadBalancerIP":"","loadBalancerIPv6":"","nodePort":"","port":67,"type":"NodePort"}` | Configuration for the DHCP service on port 67 |
+| serviceDhcp.annotations | object | `{}` | Annotations for the DHCP service |
+| serviceDhcp.enabled | bool | `true` | Generate a Service resource for DHCP traffic |
+| serviceDhcp.externalTrafficPolicy | string | `"Local"` | `spec.externalTrafficPolicy` for the DHCP Service |
+| serviceDhcp.loadBalancerIP | string | `""` | A fixed `spec.loadBalancerIP` for the DHCP Service |
+| serviceDhcp.loadBalancerIPv6 | string | `""` | A fixed `spec.loadBalancerIP` for the IPv6 DHCP Service |
+| serviceDhcp.nodePort | string | `""` | Optional node port for the DHCP service |
+| serviceDhcp.port | int | `67` | The port of the DHCP service |
+| serviceDhcp.type | string | `"NodePort"` | `spec.type` for the DHCP Service |
+| serviceDns | object | `{"annotations":{},"externalTrafficPolicy":"Local","loadBalancerIP":"","loadBalancerIPv6":"","mixedService":false,"nodePort":"","port":53,"type":"NodePort"}` | Configuration for the DNS service on port 53 |
+| serviceDns.annotations | object | `{}` | Annotations for the DNS service |
+| serviceDns.externalTrafficPolicy | string | `"Local"` | `spec.externalTrafficPolicy` for the DHCP Service |
+| serviceDns.loadBalancerIP | string | `""` | A fixed `spec.loadBalancerIP` for the DNS Service |
+| serviceDns.loadBalancerIPv6 | string | `""` | A fixed `spec.loadBalancerIP` for the IPv6 DNS Service |
+| serviceDns.mixedService | bool | `false` | deploys a mixed (TCP + UDP) Service instead of separate ones |
+| serviceDns.nodePort | string | `""` | Optional node port for the DNS service |
+| serviceDns.port | int | `53` | The port of the DNS service |
+| serviceDns.type | string | `"NodePort"` | `spec.type` for the DNS Service |
+| serviceWeb | object | `{"annotations":{},"externalTrafficPolicy":"Local","http":{"enabled":true,"nodePort":"","port":80},"https":{"enabled":true,"nodePort":"","port":443},"loadBalancerIP":"","loadBalancerIPv6":"","type":"ClusterIP"}` | Configuration for the web interface service |
+| serviceWeb.annotations | object | `{}` | Annotations for the DHCP service |
+| serviceWeb.externalTrafficPolicy | string | `"Local"` | `spec.externalTrafficPolicy` for the web interface Service |
+| serviceWeb.http | object | `{"enabled":true,"nodePort":"","port":80}` | Configuration for the HTTP web interface listener |
+| serviceWeb.http.enabled | bool | `true` | Generate a service for HTTP traffic |
+| serviceWeb.http.nodePort | string | `""` | Optional node port for the web HTTP service |
+| serviceWeb.http.port | int | `80` | The port of the web HTTP service |
+| serviceWeb.https | object | `{"enabled":true,"nodePort":"","port":443}` | Configuration for the HTTPS web interface listener |
+| serviceWeb.https.enabled | bool | `true` | Generate a service for HTTPS traffic |
+| serviceWeb.https.nodePort | string | `""` | Optional node port for the web HTTPS service |
+| serviceWeb.https.port | int | `443` | The port of the web HTTPS service |
+| serviceWeb.loadBalancerIP | string | `""` | A fixed `spec.loadBalancerIP` for the web interface Service |
+| serviceWeb.loadBalancerIPv6 | string | `""` | A fixed `spec.loadBalancerIP` for the IPv6 web interface Service |
+| serviceWeb.type | string | `"ClusterIP"` | `spec.type` for the web interface Service |
+| strategyType | string | `"RollingUpdate"` | The `spec.strategyTpye` for updates |
+| tolerations | list | `[]` | |
+| topologySpreadConstraints | list | `[]` | Specify a priorityClassName priorityClassName: "" Reference: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ |
+| virtualHost | string | `"pi.hole"` | |
+| webHttp | string | `"80"` | port the container should use to expose HTTP traffic |
+| webHttps | string | `"443"` | port the container should use to expose HTTPS traffic |
+| whitelist | object | `{}` | list of whitelisted domains to import during initial start of the container |
+
+## Maintainers
+
+| Name | Email | Url |
+| ---- | ------ | --- |
+| MoJo2600 | <christian.erhardt@mojo2k.de> | |
+
+## Remarks
+
+### MetalLB 0.8.1+
+
+pihole seems to work without issue in MetalLB 0.8.1+
+
+### MetalLB 0.7.3
+
+MetalLB 0.7.3 has a bug, where the service is not announced anymore, when the pod changes (e.g. update of a deployment). My workaround is to restart the `metallb-speaker-*` pods.
+
+## Credits
+
+[Pi-hole®](https://pi-hole.net/)
+
+## Contributing
+
+Feel free to contribute by making a [pull request](https://github.com/MoJo2600/pihole-kubernetes/pull/new/master).
+
+Please read [Contribution Guide](../../CONTRIBUTING.md) for more information on how you can contribute to this Chart.
+
+## Contributors ✨
+
+Thanks goes to these wonderful people:
+
+<!-- ALL-CONTRIBUTORS-LIST:START - Do not remove or modify this section -->
+<!-- prettier-ignore-start -->
+<!-- markdownlint-disable -->
+<table>
+ <tr>
+ <td align="center"><a href="http://www.mojo2k.de"><img src="https://avatars1.githubusercontent.com/u/2462817?v=4" width="100px;" alt=""/><br /><sub><b>Christian Erhardt</b></sub></a></td>
+ <td align="center"><a href="https://billimek.com/"><img src="https://avatars0.githubusercontent.com/u/6393612?v=4" width="100px;" alt=""/><br /><sub><b>Jeff Billimek</b></sub></a></td>
+ <td align="center"><a href="https://github.com/imle"><img src="https://avatars3.githubusercontent.com/u/4809109?v=4" width="100px;" alt=""/><br /><sub><b>Steven Imle</b></sub></a></td>
+ <td align="center"><a href="https://github.com/jetersen"><img src="https://avatars2.githubusercontent.com/u/1661688?v=4" width="100px;" alt=""/><br /><sub><b>Joseph Petersen</b></sub></a></td>
+ <td align="center"><a href="https://github.com/SiM22"><img src="https://avatars2.githubusercontent.com/u/5759618?v=4" width="100px;" alt=""/><br /><sub><b>Simon Garcia</b></sub></a></td>
+ <td align="center"><a href="https://github.com/AndyG-0"><img src="https://avatars1.githubusercontent.com/u/29743443?v=4" width="100px;" alt=""/><br /><sub><b>Andy Gilbreath</b></sub></a></td>
+ <td align="center"><a href="https://github.com/northerngit"><img src="https://avatars0.githubusercontent.com/u/4513272?v=4" width="100px;" alt=""/><br /><sub><b>James Wilson</b></sub></a></td>
+ </tr>
+ <tr>
+ <td align="center"><a href="https://github.com/jskswamy"><img src="https://avatars2.githubusercontent.com/u/232449?v=4" width="100px;" alt=""/><br /><sub><b>Krishnaswamy Subramanian</b></sub></a></td>
+ <td align="center"><a href="https://github.com/luqasn"><img src="https://avatars2.githubusercontent.com/u/274902?v=4" width="100px;" alt=""/><br /><sub><b>Lucas Romero</b></sub></a></td>
+ <td align="center"><a href="https://github.com/konturn"><img src="https://avatars0.githubusercontent.com/u/35545508?v=4" width="100px;" alt=""/><br /><sub><b>konturn</b></sub></a></td>
+ <td align="center"><a href="https://github.com/tdorsey"><img src="https://avatars3.githubusercontent.com/u/1218404?v=4" width="100px;" alt=""/><br /><sub><b>tdorsey</b></sub></a></td>
+ <td align="center"><a href="https://github.com/alesz"><img src="https://avatars0.githubusercontent.com/u/12436980?v=4" width="100px;" alt=""/><br /><sub><b>Ales Zelenik</b></sub></a></td>
+ <td align="center"><a href="https://github.com/dtourde"><img src="https://avatars1.githubusercontent.com/u/49169262?v=4" width="100px;" alt=""/><br /><sub><b>Damien TOURDE</b></sub></a></td>
+ <td align="center"><a href="https://github.com/putz612"><img src="https://avatars3.githubusercontent.com/u/952758?v=4" width="100px;" alt=""/><br /><sub><b>Jason Sievert</b></sub></a></td>
+ </tr>
+ <tr>
+ <td align="center"><a href="https://github.com/joshua-nord"><img src="https://avatars2.githubusercontent.com/u/1181300?v=4" width="100px;" alt=""/><br /><sub><b>joshua-nord</b></sub></a></td>
+ <td align="center"><a href="https://maximilianbo.de/"><img src="https://avatars3.githubusercontent.com/u/9051309?v=4" width="100px;" alt=""/><br /><sub><b>Maximilian Bode</b></sub></a></td>
+ <td align="center"><a href="https://github.com/raackley"><img src="https://avatars0.githubusercontent.com/u/1700688?v=4" width="100px;" alt=""/><br /><sub><b>raackley</b></sub></a></td>
+ <td align="center"><a href="https://github.com/StoicPerlman"><img src="https://avatars1.githubusercontent.com/u/3152359?v=4" width="100px;" alt=""/><br /><sub><b>Sam Kleiner</b></sub></a></td>
+ <td align="center"><a href="https://arpankapoor.com/"><img src="https://avatars3.githubusercontent.com/u/3677810?v=4" width="100px;" alt=""/><br /><sub><b>Arpan Kapoor</b></sub></a></td>
+ <td align="center"><a href="https://github.com/chrodriguez"><img src="https://avatars1.githubusercontent.com/u/1460882?v=4" width="100px;" alt=""/><br /><sub><b>Christian Rodriguez</b></sub></a></td>
+ <td align="center"><a href="http://dave-cahill.com/"><img src="https://avatars0.githubusercontent.com/u/361096?v=4" width="100px;" alt=""/><br /><sub><b>Dave Cahill</b></sub></a></td>
+ </tr>
+ <tr>
+ <td align="center"><a href="https://github.com/golgoth31"><img src="https://avatars2.githubusercontent.com/u/5741421?v=4" width="100px;" alt=""/><br /><sub><b>golgoth31</b></sub></a></td>
+ <td align="center"><a href="https://greg.jeanmart.me/"><img src="https://avatars3.githubusercontent.com/u/506784?v=4" width="100px;" alt=""/><br /><sub><b>Greg Jeanmart</b></sub></a></td>
+ <td align="center"><a href="https://github.com/ballj"><img src="https://avatars1.githubusercontent.com/u/38097813?v=4" width="100px;" alt=""/><br /><sub><b>Joseph Ball</b></sub></a></td>
+ <td align="center"><a href="http://www.oneko.org/"><img src="https://avatars2.githubusercontent.com/u/4233214?v=4" width="100px;" alt=""/><br /><sub><b>Karlos</b></sub></a></td>
+ <td align="center"><a href="https://github.com/dza89"><img src="https://avatars0.githubusercontent.com/u/20373984?v=4" width="100px;" alt=""/><br /><sub><b>dza89</b></sub></a></td>
+ <td align="center"><a href="https://github.com/mikewhitley"><img src="https://avatars0.githubusercontent.com/u/52802633?v=4" width="100px;" alt=""/><br /><sub><b>mikewhitley</b></sub></a></td>
+ <td align="center"><a href="https://github.com/Vashiru"><img src="https://avatars2.githubusercontent.com/u/11370057?v=4" width="100px;" alt=""/><br /><sub><b>Vashiru</b></sub></a></td>
+ </tr>
+ <tr>
+ <td align="center"><a href="https://github.com/sam-kleiner"><img src="https://avatars.githubusercontent.com/u/63059772?v=4" width="100px;" alt=""/><br /><sub><b>sam-kleiner</b></sub></a></td>
+ <td align="center"><a href="https://www.linkedin.com/in/alexgorbatchev/"><img src="https://avatars.githubusercontent.com/u/65633?v=4" width="100px;" alt=""/><br /><sub><b>Alex Gorbatchev</b></sub></a></td>
+ <td align="center"><a href="https://github.com/c-yco"><img src="https://avatars.githubusercontent.com/u/355591?v=4" width="100px;" alt=""/><br /><sub><b>Alexander Rabenstein</b></sub></a></td>
+ <td align="center"><a href="http://tibbon.com/"><img src="https://avatars.githubusercontent.com/u/82880?v=4" width="100px;" alt=""/><br /><sub><b>David Fisher</b></sub></a></td>
+ <td align="center"><a href="https://github.com/utkuozdemir"><img src="https://avatars.githubusercontent.com/u/1465819?v=4" width="100px;" alt=""/><br /><sub><b>Utku Özdemir</b></sub></a></td>
+ <td align="center"><a href="https://mor.re/"><img src="https://avatars.githubusercontent.com/u/7683567?v=4" width="100px;" alt=""/><br /><sub><b>Morre Meyer</b></sub></a></td>
+ <td align="center"><a href="https://github.com/johnsondnz"><img src="https://avatars.githubusercontent.com/u/7608966?v=4" width="100px;" alt=""/><br /><sub><b>Donald Johnson</b></sub></a></td>
+ </tr>
+ <tr>
+ <td align="center"><a href="https://winston.milli.ng/"><img src="https://avatars.githubusercontent.com/u/6162814?v=4" width="100px;" alt=""/><br /><sub><b>Winston R. Milling</b></sub></a></td>
+ <td align="center"><a href="https://github.com/larivierec"><img src="https://avatars.githubusercontent.com/u/3633214?v=4" width="100px;" alt=""/><br /><sub><b>Christopher Larivière</b></sub></a></td>
+ <td align="center"><a href="https://sievenpiper.co/"><img src="https://avatars.githubusercontent.com/u/1131882?v=4" width="100px;" alt=""/><br /><sub><b>Justin Sievenpiper</b></sub></a></td>
+ <td align="center"><a href="https://github.com/beastob"><img src="https://avatars.githubusercontent.com/u/76816315?v=4" width="100px;" alt=""/><br /><sub><b>beastob</b></sub></a></td>
+ <td align="center"><a href="https://niftyside.io/"><img src="https://avatars.githubusercontent.com/u/653739?v=4" width="100px;" alt=""/><br /><sub><b>Daniel Mühlbachler-Pietrzykowski</b></sub></a></td>
+ <td align="center"><a href="https://github.com/consideRatio"><img src="https://avatars.githubusercontent.com/u/3837114?v=4" width="100px;" alt=""/><br /><sub><b>Erik Sundell</b></sub></a></td>
+ <td align="center"><a href="https://github.com/Ornias1993"><img src="https://avatars.githubusercontent.com/u/7613738?v=4" width="100px;" alt=""/><br /><sub><b>Kjeld Schouten-Lebbing</b></sub></a></td>
+ </tr>
+ <tr>
+ <td align="center"><a href="https://github.com/mrwulf"><img src="https://avatars.githubusercontent.com/u/2494769?v=4" width="100px;" alt=""/><br /><sub><b>Brandon Wulf</b></sub></a></td>
+ <td align="center"><a href="https://github.com/DerRockWolf"><img src="https://avatars.githubusercontent.com/u/50499906?v=4" width="100px;" alt=""/><br /><sub><b>DerRockWolf</b></sub></a></td>
+ <td align="center"><a href="https://github.com/brnl"><img src="https://avatars.githubusercontent.com/u/3243133?v=4" width="100px;" alt=""/><br /><sub><b>brnl</b></sub></a></td>
+ <td align="center"><a href="https://rafaelgaspar.xyz/"><img src="https://avatars.githubusercontent.com/u/5567?v=4" width="100px;" alt=""/><br /><sub><b>Rafael Gaspar</b></sub></a></td>
+ <td align="center"><a href="https://chadimasri.com/"><img src="https://avatars.githubusercontent.com/u/1502811?v=4" width="100px;" alt=""/><br /><sub><b>Chadi El Masri</b></sub></a></td>
+ <td align="center"><a href="https://github.com/dfoulkes"><img src="https://avatars.githubusercontent.com/u/8113674?v=4" width="100px;" alt=""/><br /><sub><b>Dan Foulkes</b></sub></a></td>
+ <td align="center"><a href="https://github.com/george124816"><img src="https://avatars.githubusercontent.com/u/26443736?v=4" width="100px;" alt=""/><br /><sub><b>George Rodrigues</b></sub></a></td>
+ </tr>
+ <tr>
+ <td align="center"><a href="https://pascaliske.dev/"><img src="https://avatars.githubusercontent.com/u/7473880?v=4" width="100px;" alt=""/><br /><sub><b>Pascal Iske</b></sub></a></td>
+ <td align="center"><a href="https://www.reyth.dev/"><img src="https://avatars.githubusercontent.com/u/23526880?v=4" width="100px;" alt=""/><br /><sub><b>Theo REY</b></sub></a></td>
+ <td align="center"><a href="https://github.com/piwi3910"><img src="https://avatars.githubusercontent.com/u/12539757?v=4" width="100px;" alt=""/><br /><sub><b>Watteel Pascal</b></sub></a></td>
+ <td align="center"><a href="https://github.com/frittenlab"><img src="https://avatars.githubusercontent.com/u/29921946?v=4" width="100px;" alt=""/><br /><sub><b>simon</b></sub></a></td>
+ <td align="center"><a href="https://github.com/FernFerret"><img src="https://avatars.githubusercontent.com/u/72811?v=4" width="100px;" alt=""/><br /><sub><b>Eric</b></sub></a></td>
+ <td align="center"><a href="https://github.com/vince-vibin"><img src="https://avatars.githubusercontent.com/u/99386370?v=4" width="100px;" alt=""/><br /><sub><b>Vincent</b></sub></a></td>
+ <td align="center"><a href="https://github.com/Keydrain"><img src="https://avatars.githubusercontent.com/u/5723055?v=4" width="100px;" alt=""/><br /><sub><b>Clint</b></sub></a></td>
+ </tr>
+ <tr>
+ <td align="center"><a href="https://github.com/tamcore"><img src="https://avatars.githubusercontent.com/u/319917?v=4" width="100px;" alt=""/><br /><sub><b>Philipp B.</b></sub></a></td>
+ </tr>
+</table>
+
+<!-- markdownlint-restore -->
+<!-- prettier-ignore-end -->
+
+<!-- ALL-CONTRIBUTORS-LIST:END -->
+
+This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome!
+
+----------------------------------------------
+Autogenerated from chart metadata using [helm-docs v1.10.0](https://github.com/norwoodj/helm-docs/releases/v1.10.0)
diff --git a/charts/pihole/templates/NOTES.txt b/charts/pihole/templates/NOTES.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/charts/pihole/templates/NOTES.txt
diff --git a/charts/pihole/templates/_helpers.tpl b/charts/pihole/templates/_helpers.tpl
new file mode 100644
index 0000000..72aef75
--- /dev/null
+++ b/charts/pihole/templates/_helpers.tpl
@@ -0,0 +1,39 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "pihole.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "pihole.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "pihole.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Default password secret name.
+*/}}
+{{- define "pihole.password-secret" -}}
+{{- printf "%s-%s" (include "pihole.fullname" .) "password" | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/charts/pihole/templates/configmap-adlists.yaml b/charts/pihole/templates/configmap-adlists.yaml
new file mode 100644
index 0000000..7a496f0
--- /dev/null
+++ b/charts/pihole/templates/configmap-adlists.yaml
@@ -0,0 +1,16 @@
+{{ if .Values.adlists }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "pihole.fullname" . }}-adlists
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ template "pihole.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+data:
+ adlists.list: |
+ {{- range .Values.adlists }}
+ {{ . }}
+ {{- end }}
+{{ end }}
diff --git a/charts/pihole/templates/configmap-blacklist.yaml b/charts/pihole/templates/configmap-blacklist.yaml
new file mode 100644
index 0000000..d34b964
--- /dev/null
+++ b/charts/pihole/templates/configmap-blacklist.yaml
@@ -0,0 +1,16 @@
+{{ if .Values.blacklist }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "pihole.fullname" . }}-blacklist
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ template "pihole.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+data:
+ blacklist.txt: |
+ {{- range .Values.blacklist }}
+ {{ . }}
+ {{- end }}
+{{ end }}
diff --git a/charts/pihole/templates/configmap-regex.yaml b/charts/pihole/templates/configmap-regex.yaml
new file mode 100644
index 0000000..9d3bd6b
--- /dev/null
+++ b/charts/pihole/templates/configmap-regex.yaml
@@ -0,0 +1,16 @@
+{{ if .Values.regex }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "pihole.fullname" . }}-regex
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ template "pihole.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+data:
+ regex.list: |
+ {{- range .Values.regex }}
+ {{ . }}
+ {{- end }}
+{{ end }}
diff --git a/charts/pihole/templates/configmap-static-dhcp.yaml b/charts/pihole/templates/configmap-static-dhcp.yaml
new file mode 100644
index 0000000..c0005f5
--- /dev/null
+++ b/charts/pihole/templates/configmap-static-dhcp.yaml
@@ -0,0 +1,16 @@
+{{ if .Values.dnsmasq.staticDhcpEntries }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "pihole.fullname" . }}-static-dhcp
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ template "pihole.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+data:
+ pihole-static-dhcp.conf: |
+ {{- range .Values.dnsmasq.staticDhcpEntries }}
+ {{ . }}
+ {{- end }}
+{{ end }}
diff --git a/charts/pihole/templates/configmap-whitelist.yaml b/charts/pihole/templates/configmap-whitelist.yaml
new file mode 100644
index 0000000..ecd953d
--- /dev/null
+++ b/charts/pihole/templates/configmap-whitelist.yaml
@@ -0,0 +1,16 @@
+{{ if .Values.whitelist }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "pihole.fullname" . }}-whitelist
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ template "pihole.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+data:
+ whitelist.txt: |
+ {{- range .Values.whitelist }}
+ {{ . }}
+ {{- end }}
+{{ end }}
diff --git a/charts/pihole/templates/configmap.yaml b/charts/pihole/templates/configmap.yaml
new file mode 100644
index 0000000..af63f87
--- /dev/null
+++ b/charts/pihole/templates/configmap.yaml
@@ -0,0 +1,32 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "pihole.fullname" . }}-custom-dnsmasq
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ template "pihole.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+data:
+ 02-custom.conf: |
+ addn-hosts=/etc/addn-hosts
+ {{- range .Values.dnsmasq.upstreamServers }}
+ {{ . }}
+ {{- end }}
+ {{- range .Values.dnsmasq.customDnsEntries }}
+ {{ . }}
+ {{- end }}
+ {{- if .Values.serviceDns.loadBalancerIP }}
+ dhcp-option=6,{{ .Values.serviceDns.loadBalancerIP }}
+ {{- end }}
+ {{- range .Values.dnsmasq.customSettings }}
+ {{ . }}
+ {{- end }}
+ addn-hosts: |
+ {{- range .Values.dnsmasq.additionalHostsEntries }}
+ {{ . }}
+ {{- end }}
+ 05-pihole-custom-cname.conf: |
+ {{- range .Values.dnsmasq.customCnameEntries }}
+ {{ . }}
+ {{- end }}
diff --git a/charts/pihole/templates/deployment.yaml b/charts/pihole/templates/deployment.yaml
new file mode 100644
index 0000000..082f767
--- /dev/null
+++ b/charts/pihole/templates/deployment.yaml
@@ -0,0 +1,349 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ template "pihole.fullname" . }}
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ template "pihole.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ strategy:
+ type: {{ .Values.strategyType }}
+ {{- if eq .Values.strategyType "RollingUpdate" }}
+ rollingUpdate:
+ maxSurge: {{ .Values.maxSurge }}
+ maxUnavailable: {{ .Values.maxUnavailable }}
+ {{- end }}
+ selector:
+ matchLabels:
+ app: {{ template "pihole.name" . }}
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ annotations:
+ checksum.config.adlists: {{ include (print $.Template.BasePath "/configmap-adlists.yaml") . | sha256sum | trunc 63 }}
+ checksum.config.blacklist: {{ include (print $.Template.BasePath "/configmap-blacklist.yaml") . | sha256sum | trunc 63 }}
+ checksum.config.regex: {{ include (print $.Template.BasePath "/configmap-regex.yaml") . | sha256sum | trunc 63 }}
+ checksum.config.whitelist: {{ include (print $.Template.BasePath "/configmap-whitelist.yaml") . | sha256sum | trunc 63 }}
+ checksum.config.dnsmasqConfig: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum | trunc 63 }}
+ checksum.config.staticDhcpConfig: {{ include (print $.Template.BasePath "/configmap-static-dhcp.yaml") . | sha256sum | trunc 63 }}
+{{- with .Values.podAnnotations }}
+{{ toYaml . | indent 8 }}
+{{- end }}
+ labels:
+ app: {{ template "pihole.name" . }}
+ release: {{ .Release.Name }}
+ spec:
+ {{- if .Values.antiaff.enabled }}
+ affinity:
+ podAntiAffinity:
+ {{- if .Values.antiaff.strict }}
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ {{- else }}
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ {{- end }}
+ matchExpressions:
+ - key: release
+ operator: In
+ values:
+ - {{ .Values.antiaff.avoidRelease }}
+ {{- if .Values.antiaff.namespaces}}
+ namespaces:
+ {{- toYaml .Values.antiaff.namespaces | nindent 14 }}
+ {{- end }}
+ topologyKey: "kubernetes.io/hostname"
+ {{- end }}
+ {{- if .Values.podDnsConfig.enabled }}
+ dnsPolicy: {{ .Values.podDnsConfig.policy }}
+ dnsConfig:
+ nameservers:
+ {{- toYaml .Values.podDnsConfig.nameservers | nindent 8 }}
+ {{- end }}
+ hostname: {{ .Values.hostname }}
+ hostNetwork: {{ .Values.hostNetwork }}
+ {{- with .Values.extraInitContainers }}
+ initContainers:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ containers:
+ {{- if .Values.extraContainers }}
+ {{- toYaml .Values.extraContainers | nindent 8 }}
+ {{- end }}
+ {{- if .Values.monitoring.sidecar.enabled }}
+ - name: exporter
+ image: "{{ .Values.monitoring.sidecar.image.repository }}:{{ .Values.monitoring.sidecar.image.tag }}"
+ imagePullPolicy: {{ .Values.monitoring.sidecar.image.pullPolicy }}
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ env:
+ - name: PIHOLE_HOSTNAME
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: PIHOLE_PORT
+ value: "{{ .Values.webHttp }}"
+ - name: PIHOLE_PASSWORD
+ {{- if .Values.admin.enabled }}
+ valueFrom:
+ secretKeyRef:
+ key: {{ .Values.admin.passwordKey | default "password" }}
+ name: {{ .Values.admin.existingSecret | default (include "pihole.password-secret" .) }}
+ {{- else }}
+ value: ""
+ {{- end }}
+ resources:
+{{ toYaml .Values.monitoring.sidecar.resources | indent 12 }}
+ ports:
+ - containerPort: {{ .Values.monitoring.sidecar.port }}
+ name: prometheus
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.doh.enabled }}
+ - name: cloudflared
+ image: "{{ .Values.doh.repository }}:{{ .Values.doh.tag }}"
+ imagePullPolicy: {{ .Values.doh.pullPolicy }}
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ resources:
+ limits:
+ memory: 128Mi
+ ports:
+ - containerPort: 5053
+ name: cloudflared-udp
+ protocol: UDP
+ - containerPort: 49312
+ name: cloudflared-met
+ protocol: TCP
+ {{- if .Values.doh.envVars }}
+ env:
+ {{- range $key, $value := .Values.doh.envVars }}
+ - name: {{ $key | quote }}
+ value: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.doh.probes.liveness.enabled }}
+ livenessProbe:
+{{ toYaml .Values.doh.probes.liveness.probe | indent 12 }}
+ initialDelaySeconds: {{ .Values.doh.probes.liveness.initialDelaySeconds }}
+ failureThreshold: {{ .Values.doh.probes.liveness.failureThreshold }}
+ timeoutSeconds: {{ .Values.doh.probes.liveness.timeoutSeconds }}
+ {{- end }}
+ {{- end }}
+ - name: {{ .Chart.Name }}
+ env:
+ - name: 'WEB_PORT'
+ value: "{{ .Values.webHttp }}"
+ - name: VIRTUAL_HOST
+ value: {{ .Values.virtualHost }}
+ - name: WEBPASSWORD
+ {{- if .Values.admin.enabled }}
+ valueFrom:
+ secretKeyRef:
+ key: {{ .Values.admin.passwordKey | default "password" }}
+ name: {{ .Values.admin.existingSecret | default (include "pihole.password-secret" .) }}
+ {{- else }}
+ value: ""
+ {{- end }}
+ {{- range $key, $value := .Values.extraEnvVars }}
+ - name: {{ $key | quote }}
+ value: {{ $value | quote }}
+ {{- end }}
+ {{- range $key, $value := .Values.extraEnvVarsSecret }}
+ - name: {{ $key | quote }}
+ valueFrom:
+ secretKeyRef:
+ key: {{ $value.key | quote }}
+ name: {{ $value.name | quote }}
+ {{- end }}
+ {{- if .Values.doh.enabled }}
+ - name: 'DNS1'
+ value: "127.0.0.1#5053"
+ - name: DNS2
+ value: "127.0.0.1#5053"
+ {{- else }}
+ {{- if .Values.DNS1 }}
+ - name: 'PIHOLE_DNS_'
+ value: {{ if .Values.DNS2 }}{{ ( printf "%v;%v" .Values.DNS1 .Values.DNS2 ) | squote }}{{ else }}{{ .Values.DNS1 | squote }}{{ end }}
+ {{- end }}
+ {{- end }}
+ {{- range $key, $value := .Values.ftl }}
+ - name: 'FTLCONF_{{ $key }}'
+ value: {{ $value | quote }}
+ {{- end }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ securityContext:
+ privileged: {{ .Values.privileged }}
+ {{- if .Values.capabilities }}
+ capabilities:
+ {{- toYaml .Values.capabilities | nindent 14 }}
+ {{- end }}
+ ports:
+ - containerPort: {{ .Values.webHttp }}
+ name: http
+ protocol: TCP
+ - containerPort: 53
+ name: dns
+ protocol: TCP
+ {{- if .Values.dnsHostPort.enabled }}
+ hostPort: {{ .Values.dnsHostPort.port }}
+ {{- end }}
+ - containerPort: 53
+ name: dns-udp
+ protocol: UDP
+ {{- if .Values.dnsHostPort.enabled }}
+ hostPort: {{ .Values.dnsHostPort.port }}
+ {{- end }}
+ - containerPort: {{ .Values.webHttps }}
+ name: https
+ protocol: TCP
+ - containerPort: 67
+ name: client-udp
+ protocol: UDP
+ {{- if .Values.probes.liveness.enabled }}
+ livenessProbe:
+ {{- if eq .Values.probes.liveness.type "command" }}
+ exec:
+ command: {{ .Values.probes.liveness.command | required "An array of command(s) is required if 'type' is set to 'command'." | toYaml | nindent 16 }}
+ {{- else }}
+ httpGet:
+ path: /admin/index.php
+ port: {{ .Values.probes.liveness.port }}
+ scheme: {{ .Values.probes.liveness.scheme }}
+ {{- end }}
+ initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
+ failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
+ timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
+
+ {{- end }}
+ {{- if .Values.probes.readiness.enabled }}
+ readinessProbe:
+ httpGet:
+ path: /admin/index.php
+ port: {{ .Values.probes.readiness.port }}
+ scheme: {{ .Values.probes.readiness.scheme }}
+ initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
+ failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
+ timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
+ {{- end }}
+ volumeMounts:
+ - mountPath: /etc/pihole
+ name: config
+ {{- if .Values.persistentVolumeClaim.subPath }}
+ subPath: {{ .Values.persistentVolumeClaim.subPath }}
+ {{- end }}
+ - mountPath: /etc/dnsmasq.d/02-custom.conf
+ name: custom-dnsmasq
+ subPath: 02-custom.conf
+ - mountPath: /etc/addn-hosts
+ name: custom-dnsmasq
+ subPath: addn-hosts
+ {{- if .Values.dnsmasq.customCnameEntries }}
+ - mountPath: /etc/dnsmasq.d/05-pihole-custom-cname.conf
+ name: custom-dnsmasq
+ subPath: 05-pihole-custom-cname.conf
+ {{- end }}
+ {{- if .Values.adlists }}
+ - mountPath: /etc/pihole/adlists.list
+ name: adlists
+ subPath: adlists.list
+ {{- end }}
+ {{- if .Values.blacklist }}
+ - mountPath: /etc/pihole/blacklist.txt
+ name: blacklist
+ subPath: blacklist.txt
+ {{- end }}
+ {{- if .Values.regex }}
+ - mountPath: /etc/pihole/regex.list
+ name: regex
+ subPath: regex.list
+ {{- end }}
+ {{- if .Values.whitelist }}
+ - mountPath: /etc/pihole/whitelist.txt
+ name: whitelist
+ subPath: whitelist.txt
+ {{- end }}
+ {{- if .Values.dnsmasq.staticDhcpEntries }}
+ - mountPath: /etc/dnsmasq.d/04-pihole-static-dhcp.conf
+ name: static-dhcp
+ subPath: pihole-static-dhcp.conf
+ {{- end }}
+ {{- range $key, $value := .Values.extraVolumeMounts }}
+ - name: {{ $key }}
+{{- toYaml $value | nindent 12 }}
+ {{- end }}
+ resources:
+{{ toYaml .Values.resources | indent 12 }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- if .Values.priorityClassName }}
+ priorityClassName: "{{ .Values.priorityClassName }}"
+ {{- end }}
+ {{- with .Values.topologySpreadConstraints }}
+ topologySpreadConstraints:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ volumes:
+ - name: config
+ {{- if .Values.persistentVolumeClaim.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ if .Values.persistentVolumeClaim.existingClaim }}{{ .Values.persistentVolumeClaim.existingClaim }}{{- else }}{{ template "pihole.fullname" . }}{{- end }}
+ {{- else if .Values.customVolumes.enabled }}
+{{- toYaml .Values.customVolumes.config | nindent 8 }}
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ - configMap:
+ defaultMode: 420
+ name: {{ template "pihole.fullname" . }}-custom-dnsmasq
+ name: custom-dnsmasq
+ {{- if .Values.adlists }}
+ - configMap:
+ defaultMode: 420
+ name: {{ template "pihole.fullname" . }}-adlists
+ name: adlists
+ {{- end }}
+ {{- if .Values.whitelist }}
+ - configMap:
+ defaultMode: 420
+ name: {{ template "pihole.fullname" . }}-whitelist
+ name: whitelist
+ {{- end }}
+ {{- if .Values.dnsmasq.staticDhcpEntries }}
+ - configMap:
+ defaultMode: 420
+ name: {{ template "pihole.fullname" . }}-static-dhcp
+ name: static-dhcp
+ {{- end }}
+ {{- if .Values.blacklist }}
+ - configMap:
+ defaultMode: 420
+ name: {{ template "pihole.fullname" . }}-blacklist
+ name: blacklist
+ {{- end }}
+ {{- if .Values.regex }}
+ - configMap:
+ defaultMode: 420
+ name: {{ template "pihole.fullname" . }}-regex
+ name: regex
+ {{- end }}
+ {{- range $key, $value := .Values.extraVolumes }}
+ - name: {{ $key }}
+{{- toYaml $value | nindent 8 }}
+ {{- end }}
diff --git a/charts/pihole/templates/extra-manifests.yaml b/charts/pihole/templates/extra-manifests.yaml
new file mode 100644
index 0000000..a9bb3b6
--- /dev/null
+++ b/charts/pihole/templates/extra-manifests.yaml
@@ -0,0 +1,4 @@
+{{ range .Values.extraObjects }}
+---
+{{ tpl (toYaml .) $ }}
+{{ end }}
diff --git a/charts/pihole/templates/ingress.yaml b/charts/pihole/templates/ingress.yaml
new file mode 100644
index 0000000..fb5a081
--- /dev/null
+++ b/charts/pihole/templates/ingress.yaml
@@ -0,0 +1,38 @@
+{{- if .Values.ingress.enabled -}}
+{{- $serviceName := printf "%s-%s" (include "pihole.fullname" .) "web" -}}
+{{- $ingressPath := .Values.ingress.path -}}
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: {{ template "pihole.fullname" . }}
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ {{- with .Values.ingress.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+{{- if .Values.ingress.ingressClassName }}
+ ingressClassName: {{ .Values.ingress.ingressClassName }}
+{{- end }}
+{{- if .Values.ingress.tls }}
+ tls:
+{{ toYaml .Values.ingress.tls | indent 4 }}
+{{- end }}
+ rules:
+ {{- range .Values.ingress.hosts }}
+ - host: {{ . | quote }}
+ http:
+ paths:
+ - path: {{ $ingressPath }}
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: {{ $serviceName }}
+ port:
+ name: http
+ {{- end }}
+{{- end }}
diff --git a/charts/pihole/templates/pdb.yaml b/charts/pihole/templates/pdb.yaml
new file mode 100644
index 0000000..5103178
--- /dev/null
+++ b/charts/pihole/templates/pdb.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.podDisruptionBudget.enabled -}}
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: {{ template "pihole.fullname" . }}-pdb
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ template "pihole.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+{{- if .Values.podDisruptionBudget.minAvailable }}
+ minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}
+{{- end }}
+{{- if .Values.podDisruptionBudget.maxUnavailable }}
+ maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}
+{{- end }}
+ selector:
+ matchLabels:
+ app: {{ template "pihole.name" . }}
+{{- end }}
diff --git a/charts/pihole/templates/podmonitor.yaml b/charts/pihole/templates/podmonitor.yaml
new file mode 100644
index 0000000..bb3be7d
--- /dev/null
+++ b/charts/pihole/templates/podmonitor.yaml
@@ -0,0 +1,43 @@
+{{- if .Values.monitoring.podMonitor.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: PodMonitor
+metadata:
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ template "pihole.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ {{- with .Values.monitoring.podMonitor.labels }}
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+ name: {{ template "pihole.fullname" . }}-prometheus-exporter
+{{- if .Values.monitoring.podMonitor.namespace }}
+ namespace: {{ .Values.monitoring.podMonitor.namespace }}
+{{- end }}
+spec:
+ podMetricsEndpoints:
+ - port: prometheus
+ path: /metrics
+{{- if .Values.monitoring.podMonitor.interval }}
+ interval: {{ .Values.monitoring.podMonitor.interval }}
+{{- end }}
+{{- if .Values.monitoring.podMonitor.bearerTokenFile }}
+ bearerTokenFile: {{ .Values.monitoring.podMonitor.bearerTokenFile }}
+{{- end }}
+{{- if .Values.monitoring.podMonitor.bearerTokenSecret }}
+ bearerTokenSecret:
+ name: {{ .Values.monitoring.podMonitor.bearerTokenSecret.name }}
+ key: {{ .Values.monitoring.podMonitor.bearerTokenSecret.key }}
+ {{- if .Values.monitoring.podMonitor.bearerTokenSecret.optional }}
+ optional: {{ .Values.monitoring.podMonitor.bearerTokenSecret.optional }}
+ {{- end }}
+{{- end }}
+ jobLabel: {{ template "pihole.fullname" . }}-prometheus-exporter
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace }}
+ selector:
+ matchLabels:
+ app: {{ template "pihole.name" . }}
+ release: {{ .Release.Name }}
+{{- end }}
diff --git a/charts/pihole/templates/secret.yaml b/charts/pihole/templates/secret.yaml
new file mode 100644
index 0000000..e603cbb
--- /dev/null
+++ b/charts/pihole/templates/secret.yaml
@@ -0,0 +1,18 @@
+{{- if and .Values.admin.enabled (not .Values.admin.existingSecret) }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ template "pihole.password-secret" . }}
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ template "pihole.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+type: Opaque
+data:
+ {{- if .Values.adminPassword }}
+ password: {{ .Values.adminPassword | b64enc | quote }}
+ {{- else }}
+ password: {{ randAlphaNum 40 | b64enc | quote }}
+ {{- end }}
+{{- end }}
diff --git a/charts/pihole/templates/service-dhcp.yaml b/charts/pihole/templates/service-dhcp.yaml
new file mode 100644
index 0000000..3df2ad9
--- /dev/null
+++ b/charts/pihole/templates/service-dhcp.yaml
@@ -0,0 +1,75 @@
+{{- if .Values.serviceDhcp.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "pihole.fullname" . }}-dhcp
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ template "pihole.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+{{- if .Values.serviceDhcp.annotations }}
+ annotations:
+{{ toYaml .Values.serviceDhcp.annotations | indent 4 }}
+{{- end }}
+spec:
+ type: {{ .Values.serviceDhcp.type }}
+ {{- if and (.Values.dualStack.enabled) (not (eq .Values.serviceDhcp.type "LoadBalancer")) }}
+ ipFamilies:
+ - IPv4
+ - IPv6
+ ipFamilyPolicy: PreferDualStack
+ {{- end }}
+ {{- if .Values.serviceDhcp.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.serviceDhcp.loadBalancerIP }}
+ {{- end }}
+ {{- if or (eq .Values.serviceDhcp.type "NodePort") (eq .Values.serviceDhcp.type "LoadBalancer") }}
+ externalTrafficPolicy: {{ .Values.serviceDhcp.externalTrafficPolicy }}
+ {{- end }}
+ ports:
+ - port: {{ .Values.serviceDhcp.port }}
+ targetPort: client-udp
+ {{- if and (.Values.serviceDhcp.nodePort) (eq .Values.serviceDhcp.type "NodePort") }}
+ nodePort: {{ .Values.serviceDhcp.nodePort }}
+ {{- end }}
+ protocol: UDP
+ name: client-udp
+ selector:
+ app: {{ template "pihole.name" . }}
+ release: {{ .Release.Name }}
+---
+{{- if and (.Values.dualStack.enabled) (eq .Values.serviceDhcp.type "LoadBalancer") -}}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "pihole.fullname" . }}-dhcp-ivp6
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ template "pihole.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+{{- if .Values.serviceDhcp.annotations }}
+ annotations:
+{{ toYaml .Values.serviceDhcp.annotations | indent 4 }}
+{{- end }}
+spec:
+ type: {{ .Values.serviceDhcp.type }}
+ ipFamilies:
+ - IPv6
+ ipFamilyPolicy: SingleStack
+ {{- if .Values.serviceDhcp.loadBalancerIPv6 }}
+ loadBalancerIP: {{ .Values.serviceDhcp.loadBalancerIPv6 }}
+ {{- end }}
+ {{- if or (eq .Values.serviceDhcp.type "NodePort") (eq .Values.serviceDhcp.type "LoadBalancer") }}
+ externalTrafficPolicy: {{ .Values.serviceDhcp.externalTrafficPolicy }}
+ {{- end }}
+ ports:
+ - port: 67
+ targetPort: client-udp
+ protocol: UDP
+ name: client-udp
+ selector:
+ app: {{ template "pihole.name" . }}
+ release: {{ .Release.Name }}
+{{- end }}
+{{- end }}
diff --git a/charts/pihole/templates/service-dns-tcp.yaml b/charts/pihole/templates/service-dns-tcp.yaml
new file mode 100644
index 0000000..9206260
--- /dev/null
+++ b/charts/pihole/templates/service-dns-tcp.yaml
@@ -0,0 +1,87 @@
+{{- if not .Values.serviceDns.mixedService }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "pihole.fullname" . }}-dns-tcp
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ template "pihole.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+{{- if .Values.serviceDns.annotations }}
+ annotations:
+{{ toYaml .Values.serviceDns.annotations | indent 4 }}
+{{- end }}
+spec:
+ type: {{ .Values.serviceDns.type }}
+ {{- if and (.Values.dualStack.enabled) (not (eq .Values.serviceDns.type "LoadBalancer")) }}
+ ipFamilies:
+ - IPv4
+ - IPv6
+ ipFamilyPolicy: PreferDualStack
+ {{- end }}
+ {{- if .Values.serviceDns.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.serviceDns.loadBalancerIP }}
+ {{- end }}
+ {{- if or (eq .Values.serviceDns.type "NodePort") (eq .Values.serviceDns.type "LoadBalancer") }}
+ externalTrafficPolicy: {{ .Values.serviceDns.externalTrafficPolicy }}
+ {{- end }}
+ ports:
+ - port: {{ .Values.serviceDns.port }}
+ targetPort: dns
+ {{- if and (.Values.serviceDns.nodePort) (eq .Values.serviceDns.type "NodePort") }}
+ nodePort: {{ .Values.serviceDns.nodePort }}
+ {{- end }}
+ protocol: TCP
+ name: dns
+ {{- if .Values.monitoring.sidecar.enabled }}
+ - port: {{ .Values.monitoring.sidecar.port }}
+ targetPort: prometheus
+ protocol: TCP
+ name: prometheus
+ {{- end }}
+ selector:
+ app: {{ template "pihole.name" . }}
+ release: {{ .Release.Name }}
+---
+{{- if and (.Values.dualStack.enabled) (eq .Values.serviceDns.type "LoadBalancer") -}}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "pihole.fullname" . }}-dns-tcp-ipv6
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ template "pihole.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+{{- if .Values.serviceDns.annotations }}
+ annotations:
+{{ toYaml .Values.serviceDns.annotations | indent 4 }}
+{{- end }}
+spec:
+ type: {{ .Values.serviceDns.type }}
+ ipFamilies:
+ - IPv6
+ ipFamilyPolicy: SingleStack
+ {{- if .Values.serviceDns.loadBalancerIPv6 }}
+ loadBalancerIP: {{ .Values.serviceDns.loadBalancerIPv6 }}
+ {{- end }}
+ {{- if or (eq .Values.serviceDns.type "NodePort") (eq .Values.serviceDns.type "LoadBalancer") }}
+ externalTrafficPolicy: {{ .Values.serviceDns.externalTrafficPolicy }}
+ {{- end }}
+ ports:
+ - port: {{ .Values.serviceDns.port }}
+ targetPort: dns
+ protocol: TCP
+ name: dns
+ {{- if .Values.monitoring.sidecar.enabled }}
+ - port: {{ .Values.monitoring.sidecar.port }}
+ targetPort: prometheus
+ protocol: TCP
+ name: prometheus
+ {{- end }}
+ selector:
+ app: {{ template "pihole.name" . }}
+ release: {{ .Release.Name }}
+{{- end }}
+{{- end }}
diff --git a/charts/pihole/templates/service-dns-udp.yaml b/charts/pihole/templates/service-dns-udp.yaml
new file mode 100644
index 0000000..34835d4
--- /dev/null
+++ b/charts/pihole/templates/service-dns-udp.yaml
@@ -0,0 +1,75 @@
+{{- if not .Values.serviceDns.mixedService }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "pihole.fullname" . }}-dns-udp
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ template "pihole.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+{{- if .Values.serviceDns.annotations }}
+ annotations:
+{{ toYaml .Values.serviceDns.annotations | indent 4 }}
+{{- end }}
+spec:
+ type: {{ .Values.serviceDns.type }}
+ {{- if and (.Values.dualStack.enabled) (not (eq .Values.serviceDns.type "LoadBalancer")) }}
+ ipFamilies:
+ - IPv4
+ - IPv6
+ ipFamilyPolicy: PreferDualStack
+ {{- end }}
+ {{- if .Values.serviceDns.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.serviceDns.loadBalancerIP }}
+ {{- end }}
+ {{- if or (eq .Values.serviceDns.type "NodePort") (eq .Values.serviceDns.type "LoadBalancer") }}
+ externalTrafficPolicy: {{ .Values.serviceDns.externalTrafficPolicy }}
+ {{- end }}
+ ports:
+ - port: {{ .Values.serviceDns.port }}
+ targetPort: dns-udp
+ {{- if and (.Values.serviceDns.nodePort) (eq .Values.serviceDns.type "NodePort") }}
+ nodePort: {{ .Values.serviceDns.nodePort }}
+ {{- end }}
+ protocol: UDP
+ name: dns-udp
+ selector:
+ app: {{ template "pihole.name" . }}
+ release: {{ .Release.Name }}
+---
+{{- if and (.Values.dualStack.enabled) (eq .Values.serviceDns.type "LoadBalancer") -}}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "pihole.fullname" . }}-dns-udp-ipv6
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ template "pihole.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+{{- if .Values.serviceDns.annotations }}
+ annotations:
+{{ toYaml .Values.serviceDns.annotations | indent 4 }}
+{{- end }}
+spec:
+ type: {{ .Values.serviceDns.type }}
+ ipFamilies:
+ - IPv6
+ ipFamilyPolicy: SingleStack
+ {{- if .Values.serviceDns.loadBalancerIPv6 }}
+ loadBalancerIP: {{ .Values.serviceDns.loadBalancerIPv6 }}
+ {{- end }}
+ {{- if or (eq .Values.serviceDns.type "NodePort") (eq .Values.serviceDns.type "LoadBalancer") }}
+ externalTrafficPolicy: {{ .Values.serviceDns.externalTrafficPolicy }}
+ {{- end }}
+ ports:
+ - port: {{ .Values.serviceDns.port }}
+ targetPort: dns-udp
+ protocol: UDP
+ name: dns-udp
+ selector:
+ app: {{ template "pihole.name" . }}
+ release: {{ .Release.Name }}
+{{- end }}
+{{- end }}
diff --git a/charts/pihole/templates/service-dns.yaml b/charts/pihole/templates/service-dns.yaml
new file mode 100644
index 0000000..0772138
--- /dev/null
+++ b/charts/pihole/templates/service-dns.yaml
@@ -0,0 +1,92 @@
+{{- if .Values.serviceDns.mixedService }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "pihole.fullname" . }}-dns
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ template "pihole.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+{{- if .Values.serviceDns.annotations }}
+ annotations:
+{{ toYaml .Values.serviceDns.annotations | indent 4 }}
+{{- end }}
+spec:
+ type: {{ .Values.serviceDns.type }}
+ {{- if .Values.serviceDns.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.serviceDns.loadBalancerIP }}
+ {{- end }}
+ {{- if or (eq .Values.serviceDns.type "NodePort") (eq .Values.serviceDns.type "LoadBalancer") }}
+ externalTrafficPolicy: {{ .Values.serviceDns.externalTrafficPolicy }}
+ {{- end }}
+ ports:
+ - port: {{ .Values.serviceDns.port }}
+ targetPort: dns
+ {{- if .Values.serviceDns.nodePort }}
+ nodePort: {{ .Values.serviceDns.nodePort }}
+ {{- end }}
+ protocol: TCP
+ name: dns
+ - port: {{ .Values.serviceDns.port }}
+ targetPort: dns-udp
+ {{- if and (.Values.serviceDns.nodePort) (eq .Values.serviceDns.type "NodePort") }}
+ nodePort: {{ .Values.serviceDns.nodePort }}
+ {{- end }}
+ protocol: UDP
+ name: dns-udp
+ {{- if .Values.monitoring.sidecar.enabled }}
+ - port: {{ .Values.monitoring.sidecar.port }}
+ targetPort: prometheus
+ protocol: TCP
+ name: prometheus
+ {{- end }}
+ selector:
+ app: {{ template "pihole.name" . }}
+ release: {{ .Release.Name }}
+---
+{{- if and (.Values.dualStack.enabled) (eq .Values.serviceDns.type "LoadBalancer") -}}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "pihole.fullname" . }}-dns-ipv6
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ template "pihole.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+{{- if .Values.serviceDns.annotations }}
+ annotations:
+{{ toYaml .Values.serviceDns.annotations | indent 4 }}
+{{- end }}
+spec:
+ type: {{ .Values.serviceDns.type }}
+ ipFamilies:
+ - IPv6
+ ipFamilyPolicy: SingleStack
+ {{- if .Values.serviceDns.loadBalancerIPv6 }}
+ loadBalancerIP: {{ .Values.serviceDns.loadBalancerIPv6 }}
+ {{- end }}
+ {{- if or (eq .Values.serviceDns.type "NodePort") (eq .Values.serviceDns.type "LoadBalancer") }}
+ externalTrafficPolicy: {{ .Values.serviceDns.externalTrafficPolicy }}
+ {{- end }}
+ ports:
+ - port: {{ .Values.serviceDns.port }}
+ targetPort: dns
+ protocol: TCP
+ name: dns
+ - port: {{ .Values.serviceDns.port }}
+ targetPort: dns-udp
+ protocol: UDP
+ name: dns-udp
+ {{- if .Values.monitoring.sidecar.enabled }}
+ - port: {{ .Values.monitoring.sidecar.port }}
+ targetPort: prometheus
+ protocol: TCP
+ name: prometheus
+ {{- end }}
+ selector:
+ app: {{ template "pihole.name" . }}
+ release: {{ .Release.Name }}
+{{- end }}
+{{- end }}
diff --git a/charts/pihole/templates/service-web.yaml b/charts/pihole/templates/service-web.yaml
new file mode 100644
index 0000000..ace4603
--- /dev/null
+++ b/charts/pihole/templates/service-web.yaml
@@ -0,0 +1,102 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "pihole.fullname" . }}-web
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ template "pihole.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+{{- if .Values.serviceWeb.annotations }}
+ annotations:
+{{ toYaml .Values.serviceWeb.annotations | indent 4 }}
+{{- end }}
+spec:
+ type: {{ .Values.serviceWeb.type }}
+ {{- if and (.Values.dualStack.enabled) (not (eq .Values.serviceWeb.type "LoadBalancer")) }}
+ ipFamilies:
+ - IPv4
+ - IPv6
+ ipFamilyPolicy: PreferDualStack
+ {{- end }}
+ {{- if .Values.serviceWeb.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.serviceWeb.loadBalancerIP }}
+ {{- end }}
+ {{- if or (eq .Values.serviceWeb.type "NodePort") (eq .Values.serviceWeb.type "LoadBalancer") }}
+ externalTrafficPolicy: {{ .Values.serviceWeb.externalTrafficPolicy }}
+ {{- end }}
+ ports:
+ {{- if .Values.serviceWeb.http.enabled }}
+ - port: {{ .Values.serviceWeb.http.port }}
+ targetPort: http
+ {{- if and (.Values.serviceWeb.http.nodePort) (eq .Values.serviceWeb.type "NodePort") }}
+ nodePort: {{ .Values.serviceWeb.http.nodePort }}
+ {{- end }}
+ protocol: TCP
+ name: http
+ {{- end }}
+ {{- if .Values.serviceWeb.https.enabled }}
+ - port: {{ .Values.serviceWeb.https.port }}
+ targetPort: https
+ {{- if and (.Values.serviceWeb.https.nodePort) (eq .Values.serviceWeb.type "NodePort") }}
+ nodePort: {{ .Values.serviceWeb.https.nodePort }}
+ {{- end }}
+ protocol: TCP
+ name: https
+ {{- end }}
+ {{- if .Values.doh.enabled }}
+ - port: 49312
+ protocol: TCP
+ name: cloudflared-met
+ {{- end }}
+ selector:
+ app: {{ template "pihole.name" . }}
+ release: {{ .Release.Name }}
+---
+{{- if and (.Values.dualStack.enabled) (eq .Values.serviceWeb.type "LoadBalancer") -}}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "pihole.fullname" . }}-web-ipv6
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ template "pihole.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+{{- if .Values.serviceWeb.annotations }}
+ annotations:
+{{ toYaml .Values.serviceWeb.annotations | indent 4 }}
+{{- end }}
+spec:
+ type: {{ .Values.serviceWeb.type }}
+ ipFamilies:
+ - IPv6
+ ipFamilyPolicy: SingleStack
+ {{- if .Values.serviceWeb.loadBalancerIPv6 }}
+ loadBalancerIP: {{ .Values.serviceWeb.loadBalancerIPv6 }}
+ {{- end }}
+ {{- if or (eq .Values.serviceWeb.type "NodePort") (eq .Values.serviceWeb.type "LoadBalancer") }}
+ externalTrafficPolicy: {{ .Values.serviceWeb.externalTrafficPolicy }}
+ {{- end }}
+ ports:
+ {{- if .Values.serviceWeb.http.enabled }}
+ - port: {{ .Values.serviceWeb.http.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
+ {{- end }}
+ {{- if .Values.serviceWeb.https.enabled }}
+ - port: {{ .Values.serviceWeb.https.port }}
+ targetPort: https
+ protocol: TCP
+ name: https
+ {{- end }}
+ {{- if .Values.doh.enabled }}
+ - port: 49312
+ protocol: TCP
+ name: cloudflared-met
+ {{- end }}
+ selector:
+ app: {{ template "pihole.name" . }}
+ release: {{ .Release.Name }}
+{{- end }}
diff --git a/charts/pihole/templates/tests/test-pihole-endpoint.yml b/charts/pihole/templates/tests/test-pihole-endpoint.yml
new file mode 100644
index 0000000..dd50c04
--- /dev/null
+++ b/charts/pihole/templates/tests/test-pihole-endpoint.yml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ .Release.Name }}-smoke-test"
+ annotations:
+ "helm.sh/hook": test
+spec:
+ containers:
+ - name: hook1-container
+ image: curlimages/curl
+ imagePullPolicy: IfNotPresent
+ command: ['sh', '-c', 'curl http://{{ template "pihole.fullname" . }}-web:80/']
+ restartPolicy: Never
+ terminationGracePeriodSeconds: 0
diff --git a/charts/pihole/templates/volume-claim.yaml b/charts/pihole/templates/volume-claim.yaml
new file mode 100644
index 0000000..3b140be
--- /dev/null
+++ b/charts/pihole/templates/volume-claim.yaml
@@ -0,0 +1,31 @@
+{{- if .Values.persistentVolumeClaim.enabled -}}
+{{- if not .Values.persistentVolumeClaim.existingClaim -}}
+apiVersion: "v1"
+kind: "PersistentVolumeClaim"
+metadata:
+{{- if .Values.persistentVolumeClaim.annotations }}
+ annotations:
+{{ toYaml .Values.persistentVolumeClaim.annotations | indent 4 }}
+{{- end }}
+ labels:
+ app: {{ template "pihole.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+ component: "{{ .Values.persistentVolumeClaim.name }}"
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ name: {{ template "pihole.fullname" . }}
+spec:
+ accessModes:
+{{ toYaml .Values.persistentVolumeClaim.accessModes | indent 4 }}
+{{- if .Values.persistentVolumeClaim.storageClass }}
+{{- if (eq "-" .Values.persistentVolumeClaim.storageClass) }}
+ storageClassName: ""
+{{- else }}
+ storageClassName: "{{ .Values.persistentVolumeClaim.storageClass }}"
+{{- end }}
+{{- end }}
+ resources:
+ requests:
+ storage: "{{ .Values.persistentVolumeClaim.size }}"
+{{- end -}}
+{{- end -}}
\ No newline at end of file
diff --git a/charts/pihole/values.yaml b/charts/pihole/values.yaml
new file mode 100644
index 0000000..6a6d32e
--- /dev/null
+++ b/charts/pihole/values.yaml
@@ -0,0 +1,538 @@
+# Default values for pihole.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+# -- The number of replicas
+replicaCount: 1
+
+# -- The `spec.strategyTpye` for updates
+strategyType: RollingUpdate
+
+# -- The maximum number of Pods that can be created over the desired number of `ReplicaSet` during updating.
+maxSurge: 1
+
+# -- The maximum number of Pods that can be unavailable during updating
+maxUnavailable: 1
+
+image:
+ # -- the repostory to pull the image from
+ repository: "pihole/pihole"
+ # -- the docker tag, if left empty it will get it from the chart's appVersion
+ tag: ""
+ # -- the pull policy
+ pullPolicy: IfNotPresent
+
+dualStack:
+ # -- set this to true to enable creation of DualStack services or creation of separate IPv6 services if `serviceDns.type` is set to `"LoadBalancer"`
+ enabled: false
+
+dnsHostPort:
+ # -- set this to true to enable dnsHostPort
+ enabled: false
+ # -- default port for this pod
+ port: 53
+
+# -- Configuration for the DNS service on port 53
+serviceDns:
+ # -- deploys a mixed (TCP + UDP) Service instead of separate ones
+ mixedService: false
+
+ # -- `spec.type` for the DNS Service
+ type: NodePort
+
+ # -- The port of the DNS service
+ port: 53
+
+ # -- Optional node port for the DNS service
+ nodePort: ""
+
+ # -- `spec.externalTrafficPolicy` for the DHCP Service
+ externalTrafficPolicy: Local
+
+ # -- A fixed `spec.loadBalancerIP` for the DNS Service
+ loadBalancerIP: ""
+ # -- A fixed `spec.loadBalancerIP` for the IPv6 DNS Service
+ loadBalancerIPv6: ""
+
+ # -- Annotations for the DNS service
+ annotations:
+ {}
+ # metallb.universe.tf/address-pool: network-services
+ # metallb.universe.tf/allow-shared-ip: pihole-svc
+
+# -- Configuration for the DHCP service on port 67
+serviceDhcp:
+ # -- Generate a Service resource for DHCP traffic
+ enabled: true
+
+ # -- `spec.type` for the DHCP Service
+ type: NodePort
+
+ # -- The port of the DHCP service
+ port: 67
+
+ # -- Optional node port for the DHCP service
+ nodePort: ""
+
+ # -- `spec.externalTrafficPolicy` for the DHCP Service
+ externalTrafficPolicy: Local
+
+ # -- A fixed `spec.loadBalancerIP` for the DHCP Service
+ loadBalancerIP: ""
+ # -- A fixed `spec.loadBalancerIP` for the IPv6 DHCP Service
+ loadBalancerIPv6: ""
+
+ # -- Annotations for the DHCP service
+ annotations:
+ {}
+ # metallb.universe.tf/address-pool: network-services
+ # metallb.universe.tf/allow-shared-ip: pihole-svc
+
+# -- Configuration for the web interface service
+serviceWeb:
+ # -- Configuration for the HTTP web interface listener
+ http:
+ # -- Generate a service for HTTP traffic
+ enabled: true
+
+ # -- The port of the web HTTP service
+ port: 80
+
+ # -- Optional node port for the web HTTP service
+ nodePort: ""
+
+ # -- Configuration for the HTTPS web interface listener
+ https:
+ # -- Generate a service for HTTPS traffic
+ enabled: true
+
+ # -- The port of the web HTTPS service
+ port: 443
+
+ # -- Optional node port for the web HTTPS service
+ nodePort: ""
+
+ # -- `spec.type` for the web interface Service
+ type: ClusterIP
+
+ # -- `spec.externalTrafficPolicy` for the web interface Service
+ externalTrafficPolicy: Local
+
+ # -- A fixed `spec.loadBalancerIP` for the web interface Service
+ loadBalancerIP: ""
+ # -- A fixed `spec.loadBalancerIP` for the IPv6 web interface Service
+ loadBalancerIPv6: ""
+
+ # -- Annotations for the DHCP service
+ annotations:
+ {}
+ # metallb.universe.tf/address-pool: network-services
+ # metallb.universe.tf/allow-shared-ip: pihole-svc
+
+virtualHost: pi.hole
+
+# -- Configuration for the Ingress
+ingress:
+ # -- Generate a Ingress resource
+ enabled: false
+
+ # -- Specify an ingressClassName
+ # ingressClassName: nginx
+
+ # -- Annotations for the ingress
+ annotations:
+ {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ path: /
+ hosts:
+ # virtualHost (default value is pi.hole) will be appended to the hosts
+ - chart-example.local
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # #- virtualHost (default value is pi.hole) will be appended to the hosts
+ # - chart-example.local
+
+# -- Probes configuration
+probes:
+ # -- probes.liveness -- Configure the healthcheck for the ingress controller
+ liveness:
+ # -- Generate a liveness probe
+ # 'type' defaults to httpGet, can be set to 'command' to use a command type liveness probe.
+ type: httpGet
+ # command:
+ # - /bin/bash
+ # - -c
+ # - /bin/true
+ enabled: true
+ initialDelaySeconds: 60
+ failureThreshold: 10
+ timeoutSeconds: 5
+ port: http
+ scheme: HTTP
+ readiness:
+ # -- Generate a readiness probe
+ enabled: true
+ initialDelaySeconds: 60
+ failureThreshold: 3
+ timeoutSeconds: 5
+ port: http
+ scheme: HTTP
+
+# -- We usually recommend not to specify default resources and to leave this as a conscious
+# -- choice for the user. This also increases chances charts run on environments with little
+# -- resources, such as Minikube. If you do want to specify resources, uncomment the following
+# -- lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+resources:
+ {}
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+# -- `spec.PersitentVolumeClaim` configuration
+persistentVolumeClaim:
+ # -- set to true to use pvc
+ enabled: false
+
+ # -- specify an existing `PersistentVolumeClaim` to use
+ # existingClaim: ""
+
+ # -- Annotations for the `PersitentVolumeClaim`
+ annotations: {}
+
+ accessModes:
+ - ReadWriteOnce
+
+ size: "500Mi"
+
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ # storageClass: "-"
+
+ ## If subPath is set mount a sub folder of a volume instead of the root of the volume.
+ ## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
+
+ ## subPath: "pihole"
+
+nodeSelector: {}
+
+tolerations: []
+
+# -- Specify a priorityClassName
+# priorityClassName: ""
+
+# Reference: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+topologySpreadConstraints: []
+# - maxSkew: <integer>
+# topologyKey: <string>
+# whenUnsatisfiable: <string>
+# labelSelector: <object>
+
+affinity: {}
+
+# -- Administrator password when not using an existing secret (see below)
+adminPassword: "admin"
+
+# -- Use an existing secret for the admin password.
+admin:
+ # -- If set to false admin password will be disabled, adminPassword specified above and the pre-existing secret (if specified) will be ignored.
+ enabled: true
+ # -- Specify an existing secret to use as admin password
+ existingSecret: ""
+ # -- Specify the key inside the secret to use
+ passwordKey: "password"
+
+# -- extraEnvironmentVars is a list of extra enviroment variables to set for pihole to use
+extraEnvVars:
+ {}
+ # TZ: UTC
+
+# -- extraEnvVarsSecret is a list of secrets to load in as environment variables.
+extraEnvVarsSecret:
+ {}
+ # env_var:
+ # name: secret-name
+ # key: secret-key
+
+# -- default upstream DNS 1 server to use
+DNS1: "8.8.8.8"
+# -- default upstream DNS 2 server to use
+DNS2: "8.8.4.4"
+
+antiaff:
+ # -- set to true to enable antiaffinity (example: 2 pihole DNS in the same cluster)
+ enabled: false
+ # -- Here you can set the pihole release (you set in `helm install <releasename> ...`)
+ # you want to avoid
+ avoidRelease: pihole1
+ # -- Here you can choose between preferred or required
+ strict: true
+ # -- Here you can pass namespaces to be part of those inclueded in anti-affinity
+ namespaces: []
+
+doh:
+ # -- set to true to enabled DNS over HTTPs via cloudflared
+ enabled: false
+ name: "cloudflared"
+ repository: "crazymax/cloudflared"
+ tag: latest
+ pullPolicy: IfNotPresent
+ # -- Here you can pass environment variables to the DoH container, for example:
+ envVars:
+ {}
+ # TUNNEL_DNS_UPSTREAM: "https://1.1.1.2/dns-query,https://1.0.0.2/dns-query"
+
+ # -- Probes configuration
+ probes:
+ # -- Configure the healthcheck for the doh container
+ liveness:
+ # -- set to true to enable liveness probe
+ enabled: true
+ # -- customize the liveness probe
+ probe:
+ exec:
+ command:
+ - nslookup
+ - -po=5053
+ - cloudflare.com
+ - "127.0.0.1"
+ # -- defines the initial delay for the liveness probe
+ initialDelaySeconds: 60
+ # -- defines the failure threshold for the liveness probe
+ failureThreshold: 10
+ # -- defines the timeout in secondes for the liveness probe
+ timeoutSeconds: 5
+
+dnsmasq:
+ # -- Add upstream dns servers. All lines will be added to the pihole dnsmasq configuration
+ upstreamServers: []
+ # - server=/foo.bar/192.168.178.10
+ # - server=/bar.foo/192.168.178.11
+
+ # -- Add custom dns entries to override the dns resolution. All lines will be added to the pihole dnsmasq configuration.
+ customDnsEntries: []
+ # - address=/foo.bar/192.168.178.10
+ # - address=/bar.foo/192.168.178.11
+
+ # -- Dnsmasq reads the /etc/hosts file to resolve ips. You can add additional entries if you like
+ additionalHostsEntries: []
+ # - 192.168.0.3 host4
+ # - 192.168.0.4 host5
+
+ # -- Static DHCP config
+ staticDhcpEntries: []
+ # staticDhcpEntries:
+ # - dhcp-host=MAC_ADDRESS,IP_ADDRESS,HOSTNAME
+
+ # -- Other options
+ customSettings:
+ # otherSettings:
+ # - rebind-domain-ok=/plex.direct/
+
+ # -- Here we specify custom cname entries that should point to `A` records or
+ # elements in customDnsEntries array.
+ # The format should be:
+ # - cname=cname.foo.bar,foo.bar
+ # - cname=cname.bar.foo,bar.foo
+ # - cname=cname record,dns record
+ customCnameEntries: []
+ # Here we specify custom cname entries that should point to `A` records or
+ # elements in customDnsEntries array.
+ # The format should be:
+ # - cname=cname.foo.bar,foo.bar
+ # - cname=cname.bar.foo,bar.foo
+ # - cname=cname record,dns record
+
+# -- list of adlists to import during initial start of the container
+adlists:
+ {}
+ # If you want to provide blocklists, add them here.
+ # - https://hosts-file.net/grm.txt
+ # - https://reddestdream.github.io/Projects/MinimalHosts/etc/MinimalHostsBlocker/minimalhosts
+
+# -- list of whitelisted domains to import during initial start of the container
+whitelist:
+ {}
+ # If you want to provide whitelisted domains, add them here.
+ # - clients4.google.com
+
+# -- list of blacklisted domains to import during initial start of the container
+blacklist:
+ {}
+ # If you want to have special domains blacklisted, add them here
+ # - *.blackist.com
+
+# -- list of blacklisted regex expressions to import during initial start of the container
+regex:
+ {}
+ # Add regular expression blacklist items
+ # - (^|\.)facebook\.com$
+
+# -- values that should be added to pihole-FTL.conf
+ftl:
+ {}
+ # Add values for pihole-FTL.conf
+ # MAXDBDAYS: 14
+
+# -- port the container should use to expose HTTP traffic
+webHttp: "80"
+
+# -- port the container should use to expose HTTPS traffic
+webHttps: "443"
+
+# -- hostname of pod
+hostname: ""
+
+# -- should the container use host network
+hostNetwork: "false"
+
+# -- should container run in privileged mode
+privileged: "false"
+
+# linux capabilities container should run with
+capabilities:
+ {}
+ # add:
+ # - NET_ADMIN
+
+customVolumes:
+ # -- set this to true to enable custom volumes
+ enabled: false
+ # -- any volume type can be used here
+ config:
+ {}
+ # hostPath:
+ # path: "/mnt/data"
+
+# -- any extra volumes you might want
+extraVolumes:
+ {}
+ # external-conf:
+ # configMap:
+ # name: pi-hole-lighttpd-external-conf
+
+# -- any extra volume mounts you might want
+extraVolumeMounts:
+ {}
+ # external-conf:
+ # mountPath: /etc/lighttpd/external.conf
+ # subPath: external.conf
+
+extraContainers:
+ []
+ # - name: pihole-logwatcher
+ # image: your-registry/pihole-logwatcher
+ # imagePullPolicy: Always
+ # resources:
+ # requests:
+ # cpu: 100m
+ # memory: 5Mi
+ # limits:
+ # cpu: 100m
+ # memory: 5Mi
+ # volumeMounts:
+ # - name: pihole-logs
+ # mountPath: /var/log/pihole
+
+# -- any extra kubernetes manifests you might want
+extraObjects:
+ []
+ # - apiVersion: v1
+ # kind: ConfigMap
+ # metadata:
+ # name: pi-hole-lighttpd-external-conf
+ # data:
+ # external.conf: |
+ # $HTTP["host"] =~ "example.foo" {
+ # # If we're using a non-standard host for pihole, ensure the Pi-hole
+ # # Block Page knows that this is not a blocked domain
+ # setenv.add-environment = ("fqdn" => "true")
+ #
+ # # Enable the SSL engine with a cert, only for this specific host
+ # $SERVER["socket"] == ":443" {
+ # ssl.engine = "enable"
+ # ssl.pemfile = "/etc/ssl/lighttpd-private/tls.crt"
+ # ssl.privkey = "/etc/ssl/lighttpd-private/tls.key"
+ # ssl.ca-file = "/etc/ssl/lighttpd-private/ca.crt"
+ # ssl.honor-cipher-order = "enable"
+ # ssl.cipher-list = "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH"
+ # ssl.use-sslv2 = "disable"
+ # ssl.use-sslv3 = "disable"
+ # }
+ # }
+ #
+ # # Redirect HTTP to HTTPS
+ # $HTTP["scheme"] == "http" {
+ # $HTTP["host"] =~ ".*" {
+ # url.redirect = (".*" => "https://%0$0")
+ # }
+ # }
+
+# -- Additional annotations for pods
+podAnnotations:
+ {}
+ # Example below allows Prometheus to scape on metric port (requires pihole-exporter sidecar enabled)
+ # prometheus.io/port: '9617'
+ # prometheus.io/scrape: 'true'
+
+# -- any initContainers you might want to run before starting pihole
+extraInitContainers:
+ []
+ # - name: copy-config
+ # image: busybox
+ # args:
+ # - sh
+ # - -c
+ # - |
+ # cp /etc/lighttpd-cm/external.conf /etc/lighttpd/
+ # ls -l /etc/lighttpd/
+ # volumeMounts:
+ # - name: external-conf-cm
+ # mountPath: /etc/lighttpd-cm/
+ # - name: external-conf
+ # mountPath: /etc/lighttpd/
+
+monitoring:
+ # -- Preferably adding prometheus scrape annotations rather than enabling podMonitor.
+ podMonitor:
+ # -- set this to true to enable podMonitor
+ enabled: false
+ # -- Sidecar configuration
+ sidecar:
+ # -- set this to true to enable podMonitor as sidecar
+ enabled: false
+ port: 9617
+ image:
+ repository: ekofr/pihole-exporter
+ tag: v0.3.0
+ pullPolicy: IfNotPresent
+ resources:
+ limits:
+ memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+podDnsConfig:
+ enabled: true
+ policy: "None"
+ nameservers:
+ - 127.0.0.1
+ - 8.8.8.8
+
+# -- configure a Pod Disruption Budget
+podDisruptionBudget:
+ # -- set to true to enable creating the PDB
+ enabled: false
+ # -- minimum number of pods Kubernetes should try to have running at all times
+ minAvailable: 1
+ # -- maximum number of pods Kubernetes will allow to be unavailable. Cannot set both `minAvailable` and `maxAvailable`
+ # maxUnavailable: 1
diff --git a/charts/port-allocator/.helmignore b/charts/port-allocator/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/port-allocator/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/port-allocator/Chart.yaml b/charts/port-allocator/Chart.yaml
new file mode 100644
index 0000000..faeb616
--- /dev/null
+++ b/charts/port-allocator/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: port-allocator
+description: A Helm chart for PCloud port-allocator
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/port-allocator/templates/install.yaml b/charts/port-allocator/templates/install.yaml
new file mode 100644
index 0000000..ec7a144
--- /dev/null
+++ b/charts/port-allocator/templates/install.yaml
@@ -0,0 +1,60 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: ssh-key
+type: Opaque
+data:
+ private: {{ .Values.sshPrivateKey }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: port-allocator
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: port-allocator
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ protocol: TCP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: port-allocator
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: port-allocator
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: port-allocator
+ spec:
+ volumes:
+ - name: ssh-key
+ secret:
+ secretName: ssh-key
+ containers:
+ - name: port-allocator
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: 8080
+ protocol: TCP
+ command:
+ - server
+ - --port=8080
+ - --repo-addr={{ .Values.repoAddr }}
+ - --ssh-key=/pcloud/ssh-key/private
+ - --ingress-nginx-path={{ .Values.ingressNginxPath }}
+ volumeMounts:
+ - name: ssh-key
+ readOnly: true
+ mountPath: /pcloud/ssh-key
diff --git a/charts/port-allocator/values.yaml b/charts/port-allocator/values.yaml
new file mode 100644
index 0000000..4cdcf67
--- /dev/null
+++ b/charts/port-allocator/values.yaml
@@ -0,0 +1,7 @@
+image:
+ repository: giolekva/port-allocator
+ tag: latest
+ pullPolicy: Always
+repoAddr: 192.168.0.11
+sshPrivateKey: key
+ingressNginxPath: /path/to/ingress.yaml
diff --git a/charts/postgresql/.helmignore b/charts/postgresql/.helmignore
new file mode 100644
index 0000000..f0c1319
--- /dev/null
+++ b/charts/postgresql/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/charts/postgresql/Chart.lock b/charts/postgresql/Chart.lock
new file mode 100644
index 0000000..ec67d36
--- /dev/null
+++ b/charts/postgresql/Chart.lock
@@ -0,0 +1,6 @@
+dependencies:
+- name: common
+ repository: oci://registry-1.docker.io/bitnamicharts
+ version: 2.4.0
+digest: sha256:8c1a5dc923412d11d4d841420494b499cb707305c8b9f87f45ea1a8bf3172cb3
+generated: "2023-05-21T19:47:56.903329844Z"
diff --git a/charts/postgresql/Chart.yaml b/charts/postgresql/Chart.yaml
new file mode 100644
index 0000000..6b84dca
--- /dev/null
+++ b/charts/postgresql/Chart.yaml
@@ -0,0 +1,30 @@
+annotations:
+ category: Database
+ licenses: Apache-2.0
+apiVersion: v2
+appVersion: 15.3.0
+dependencies:
+- name: common
+ repository: oci://registry-1.docker.io/bitnamicharts
+ tags:
+ - bitnami-common
+ version: 2.x.x
+description: PostgreSQL (Postgres) is an open source object-relational database known
+ for reliability and data integrity. ACID-compliant, it supports foreign keys, joins,
+ views, triggers and stored procedures.
+home: https://bitnami.com
+icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-220x234.png
+keywords:
+- postgresql
+- postgres
+- database
+- sql
+- replication
+- cluster
+maintainers:
+- name: VMware, Inc.
+ url: https://github.com/bitnami/charts
+name: postgresql
+sources:
+- https://github.com/bitnami/charts/tree/main/bitnami/postgresql
+version: 12.5.6
diff --git a/charts/postgresql/README.md b/charts/postgresql/README.md
new file mode 100644
index 0000000..fc74780
--- /dev/null
+++ b/charts/postgresql/README.md
@@ -0,0 +1,683 @@
+<!--- app-name: PostgreSQL -->
+
+# PostgreSQL packaged by Bitnami
+
+PostgreSQL (Postgres) is an open source object-relational database known for reliability and data integrity. ACID-compliant, it supports foreign keys, joins, views, triggers and stored procedures.
+
+[Overview of PostgreSQL](http://www.postgresql.org)
+
+Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement.
+
+## TL;DR
+
+```console
+helm install my-release oci://registry-1.docker.io/bitnamicharts/postgresql
+```
+
+## Introduction
+
+This chart bootstraps a [PostgreSQL](https://github.com/bitnami/containers/tree/main/bitnami/postgresql) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+For HA, please see [this repo](https://github.com/bitnami/charts/tree/main/bitnami/postgresql-ha)
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
+
+## Prerequisites
+
+- Kubernetes 1.19+
+- Helm 3.2.0+
+- PV provisioner support in the underlying infrastructure
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```console
+helm install my-release oci://registry-1.docker.io/bitnamicharts/postgresql
+```
+
+The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```console
+helm delete my-release
+```
+
+The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release.
+
+To delete the PVC's associated with `my-release`:
+
+```console
+kubectl delete pvc -l release=my-release
+```
+
+> **Note**: Deleting the PVC's will delete postgresql data as well. Please be cautious before doing it.
+
+## Parameters
+
+### Global parameters
+
+| Name | Description | Value |
+| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----- |
+| `global.imageRegistry` | Global Docker image registry | `""` |
+| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
+| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
+| `global.postgresql.auth.postgresPassword` | Password for the "postgres" admin user (overrides `auth.postgresPassword`) | `""` |
+| `global.postgresql.auth.username` | Name for a custom user to create (overrides `auth.username`) | `""` |
+| `global.postgresql.auth.password` | Password for the custom user to create (overrides `auth.password`) | `""` |
+| `global.postgresql.auth.database` | Name for a custom database to create (overrides `auth.database`) | `""` |
+| `global.postgresql.auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`). | `""` |
+| `global.postgresql.auth.secretKeys.adminPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` |
+| `global.postgresql.auth.secretKeys.userPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` |
+| `global.postgresql.auth.secretKeys.replicationPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` |
+| `global.postgresql.service.ports.postgresql` | PostgreSQL service port (overrides `service.ports.postgresql`) | `""` |
+
+### Common parameters
+
+| Name | Description | Value |
+| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- |
+| `kubeVersion` | Override Kubernetes version | `""` |
+| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` |
+| `fullnameOverride` | String to fully override common.names.fullname template | `""` |
+| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` |
+| `extraDeploy` | Array of extra objects to deploy with the release (evaluated as a template) | `[]` |
+| `commonLabels` | Add labels to all the deployed resources | `{}` |
+| `commonAnnotations` | Add annotations to all the deployed resources | `{}` |
+| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` |
+| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` |
+| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` |
+
+### PostgreSQL common parameters
+
+| Name | Description | Value |
+| ---------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- |
+| `image.registry` | PostgreSQL image registry | `docker.io` |
+| `image.repository` | PostgreSQL image repository | `bitnami/postgresql` |
+| `image.tag` | PostgreSQL image tag (immutable tags are recommended) | `15.3.0-debian-11-r7` |
+| `image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
+| `image.pullPolicy` | PostgreSQL image pull policy | `IfNotPresent` |
+| `image.pullSecrets` | Specify image pull secrets | `[]` |
+| `image.debug` | Specify if debug values should be set | `false` |
+| `auth.enablePostgresUser` | Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user | `true` |
+| `auth.postgresPassword` | Password for the "postgres" admin user. Ignored if `auth.existingSecret` is provided | `""` |
+| `auth.username` | Name for a custom user to create | `""` |
+| `auth.password` | Password for the custom user to create. Ignored if `auth.existingSecret` is provided | `""` |
+| `auth.database` | Name for a custom database to create | `""` |
+| `auth.replicationUsername` | Name of the replication user | `repl_user` |
+| `auth.replicationPassword` | Password for the replication user. Ignored if `auth.existingSecret` is provided | `""` |
+| `auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. | `""` |
+| `auth.secretKeys.adminPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `postgres-password` |
+| `auth.secretKeys.userPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `password` |
+| `auth.secretKeys.replicationPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `replication-password` |
+| `auth.usePasswordFiles` | Mount credentials as a files instead of using an environment variable | `false` |
+| `architecture` | PostgreSQL architecture (`standalone` or `replication`) | `standalone` |
+| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` |
+| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. | `0` |
+| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` |
+| `containerPorts.postgresql` | PostgreSQL container port | `5432` |
+| `audit.logHostname` | Log client hostnames | `false` |
+| `audit.logConnections` | Add client log-in operations to the log file | `false` |
+| `audit.logDisconnections` | Add client log-outs operations to the log file | `false` |
+| `audit.pgAuditLog` | Add operations to log using the pgAudit extension | `""` |
+| `audit.pgAuditLogCatalog` | Log catalog using pgAudit | `off` |
+| `audit.clientMinMessages` | Message log level to share with the user | `error` |
+| `audit.logLinePrefix` | Template for log line prefix (default if not set) | `""` |
+| `audit.logTimezone` | Timezone for the log timestamps | `""` |
+| `ldap.enabled` | Enable LDAP support | `false` |
+| `ldap.server` | IP address or name of the LDAP server. | `""` |
+| `ldap.port` | Port number on the LDAP server to connect to | `""` |
+| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `""` |
+| `ldap.suffix` | String to append to the user name when forming the DN to bind | `""` |
+| `ldap.basedn` | Root DN to begin the search for the user in | `""` |
+| `ldap.binddn` | DN of user to bind to LDAP | `""` |
+| `ldap.bindpw` | Password for the user to bind to LDAP | `""` |
+| `ldap.searchAttribute` | Attribute to match against the user name in the search | `""` |
+| `ldap.searchFilter` | The search filter to use when doing search+bind authentication | `""` |
+| `ldap.scheme` | Set to `ldaps` to use LDAPS | `""` |
+| `ldap.tls.enabled` | Se to true to enable TLS encryption | `false` |
+| `ldap.uri` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored. | `""` |
+| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql/data` |
+| `postgresqlSharedPreloadLibraries` | Shared preload libraries (comma-separated list) | `pgaudit` |
+| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) | `true` |
+| `shmVolume.sizeLimit` | Set this to enable a size limit on the shm tmpfs | `""` |
+| `tls.enabled` | Enable TLS traffic support | `false` |
+| `tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` |
+| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` |
+| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `""` |
+| `tls.certFilename` | Certificate filename | `""` |
+| `tls.certKeyFilename` | Certificate key filename | `""` |
+| `tls.certCAFilename` | CA Certificate filename | `""` |
+| `tls.crlFilename` | File containing a Certificate Revocation List | `""` |
+
+### PostgreSQL Primary parameters
+
+| Name | Description | Value |
+| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------------- |
+| `primary.name` | Name of the primary database (eg primary, master, leader, ...) | `primary` |
+| `primary.configuration` | PostgreSQL Primary main configuration to be injected as ConfigMap | `""` |
+| `primary.pgHbaConfiguration` | PostgreSQL Primary client authentication configuration | `""` |
+| `primary.existingConfigmap` | Name of an existing ConfigMap with PostgreSQL Primary configuration | `""` |
+| `primary.extendedConfiguration` | Extended PostgreSQL Primary configuration (appended to main or default configuration) | `""` |
+| `primary.existingExtendedConfigmap` | Name of an existing ConfigMap with PostgreSQL Primary extended configuration | `""` |
+| `primary.initdb.args` | PostgreSQL initdb extra arguments | `""` |
+| `primary.initdb.postgresqlWalDir` | Specify a custom location for the PostgreSQL transaction log | `""` |
+| `primary.initdb.scripts` | Dictionary of initdb scripts | `{}` |
+| `primary.initdb.scriptsConfigMap` | ConfigMap with scripts to be run at first boot | `""` |
+| `primary.initdb.scriptsSecret` | Secret with scripts to be run at first boot (in case it contains sensitive information) | `""` |
+| `primary.initdb.user` | Specify the PostgreSQL username to execute the initdb scripts | `""` |
+| `primary.initdb.password` | Specify the PostgreSQL password to execute the initdb scripts | `""` |
+| `primary.standby.enabled` | Whether to enable current cluster's primary as standby server of another cluster or not | `false` |
+| `primary.standby.primaryHost` | The Host of replication primary in the other cluster | `""` |
+| `primary.standby.primaryPort` | The Port of replication primary in the other cluster | `""` |
+| `primary.extraEnvVars` | Array with extra environment variables to add to PostgreSQL Primary nodes | `[]` |
+| `primary.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes | `""` |
+| `primary.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for PostgreSQL Primary nodes | `""` |
+| `primary.command` | Override default container command (useful when using custom images) | `[]` |
+| `primary.args` | Override default container args (useful when using custom images) | `[]` |
+| `primary.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL Primary containers | `true` |
+| `primary.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` |
+| `primary.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
+| `primary.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
+| `primary.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
+| `primary.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
+| `primary.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL Primary containers | `true` |
+| `primary.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
+| `primary.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
+| `primary.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` |
+| `primary.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` |
+| `primary.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
+| `primary.startupProbe.enabled` | Enable startupProbe on PostgreSQL Primary containers | `false` |
+| `primary.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` |
+| `primary.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
+| `primary.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` |
+| `primary.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` |
+| `primary.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
+| `primary.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
+| `primary.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
+| `primary.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
+| `primary.lifecycleHooks` | for the PostgreSQL Primary container to automate configuration before or after startup | `{}` |
+| `primary.resources.limits` | The resources limits for the PostgreSQL Primary containers | `{}` |
+| `primary.resources.requests.memory` | The requested memory for the PostgreSQL Primary containers | `256Mi` |
+| `primary.resources.requests.cpu` | The requested cpu for the PostgreSQL Primary containers | `250m` |
+| `primary.podSecurityContext.enabled` | Enable security context | `true` |
+| `primary.podSecurityContext.fsGroup` | Group ID for the pod | `1001` |
+| `primary.containerSecurityContext.enabled` | Enable container security context | `true` |
+| `primary.containerSecurityContext.runAsUser` | User ID for the container | `1001` |
+| `primary.hostAliases` | PostgreSQL primary pods host aliases | `[]` |
+| `primary.hostNetwork` | Specify if host network should be enabled for PostgreSQL pod (postgresql primary) | `false` |
+| `primary.hostIPC` | Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) | `false` |
+| `primary.labels` | Map of labels to add to the statefulset (postgresql primary) | `{}` |
+| `primary.annotations` | Annotations for PostgreSQL primary pods | `{}` |
+| `primary.podLabels` | Map of labels to add to the pods (postgresql primary) | `{}` |
+| `primary.podAnnotations` | Map of annotations to add to the pods (postgresql primary) | `{}` |
+| `primary.podAffinityPreset` | PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `primary.podAntiAffinityPreset` | PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` |
+| `primary.nodeAffinityPreset.type` | PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `primary.nodeAffinityPreset.key` | PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. | `""` |
+| `primary.nodeAffinityPreset.values` | PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. | `[]` |
+| `primary.affinity` | Affinity for PostgreSQL primary pods assignment | `{}` |
+| `primary.nodeSelector` | Node labels for PostgreSQL primary pods assignment | `{}` |
+| `primary.tolerations` | Tolerations for PostgreSQL primary pods assignment | `[]` |
+| `primary.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
+| `primary.priorityClassName` | Priority Class to use for each pod (postgresql primary) | `""` |
+| `primary.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` |
+| `primary.terminationGracePeriodSeconds` | Seconds PostgreSQL primary pod needs to terminate gracefully | `""` |
+| `primary.updateStrategy.type` | PostgreSQL Primary statefulset strategy type | `RollingUpdate` |
+| `primary.updateStrategy.rollingUpdate` | PostgreSQL Primary statefulset rolling update configuration parameters | `{}` |
+| `primary.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) | `[]` |
+| `primary.extraVolumes` | Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) | `[]` |
+| `primary.sidecars` | Add additional sidecar containers to the PostgreSQL Primary pod(s) | `[]` |
+| `primary.initContainers` | Add additional init containers to the PostgreSQL Primary pod(s) | `[]` |
+| `primary.extraPodSpec` | Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) | `{}` |
+| `primary.service.type` | Kubernetes Service type | `ClusterIP` |
+| `primary.service.ports.postgresql` | PostgreSQL service port | `5432` |
+| `primary.service.nodePorts.postgresql` | Node port for PostgreSQL | `""` |
+| `primary.service.clusterIP` | Static clusterIP or None for headless services | `""` |
+| `primary.service.annotations` | Annotations for PostgreSQL primary service | `{}` |
+| `primary.service.loadBalancerIP` | Load balancer IP if service type is `LoadBalancer` | `""` |
+| `primary.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` |
+| `primary.service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` |
+| `primary.service.extraPorts` | Extra ports to expose in the PostgreSQL primary service | `[]` |
+| `primary.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
+| `primary.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
+| `primary.service.headless.annotations` | Additional custom annotations for headless PostgreSQL primary service | `{}` |
+| `primary.persistence.enabled` | Enable PostgreSQL Primary data persistence using PVC | `true` |
+| `primary.persistence.existingClaim` | Name of an existing PVC to use | `""` |
+| `primary.persistence.mountPath` | The path the volume will be mounted at | `/bitnami/postgresql` |
+| `primary.persistence.subPath` | The subdirectory of the volume to mount to | `""` |
+| `primary.persistence.storageClass` | PVC Storage Class for PostgreSQL Primary data volume | `""` |
+| `primary.persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `["ReadWriteOnce"]` |
+| `primary.persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` |
+| `primary.persistence.annotations` | Annotations for the PVC | `{}` |
+| `primary.persistence.labels` | Labels for the PVC | `{}` |
+| `primary.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` |
+| `primary.persistence.dataSource` | Custom PVC data source | `{}` |
+
+### PostgreSQL read only replica parameters (only used when `architecture` is set to `replication`)
+
+| Name | Description | Value |
+| ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------------- |
+| `readReplicas.name` | Name of the read replicas database (eg secondary, slave, ...) | `read` |
+| `readReplicas.replicaCount` | Number of PostgreSQL read only replicas | `1` |
+| `readReplicas.extendedConfiguration` | Extended PostgreSQL read only replicas configuration (appended to main or default configuration) | `""` |
+| `readReplicas.extraEnvVars` | Array with extra environment variables to add to PostgreSQL read only nodes | `[]` |
+| `readReplicas.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes | `""` |
+| `readReplicas.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for PostgreSQL read only nodes | `""` |
+| `readReplicas.command` | Override default container command (useful when using custom images) | `[]` |
+| `readReplicas.args` | Override default container args (useful when using custom images) | `[]` |
+| `readReplicas.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL read only containers | `true` |
+| `readReplicas.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` |
+| `readReplicas.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
+| `readReplicas.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
+| `readReplicas.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
+| `readReplicas.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
+| `readReplicas.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL read only containers | `true` |
+| `readReplicas.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
+| `readReplicas.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
+| `readReplicas.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` |
+| `readReplicas.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` |
+| `readReplicas.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
+| `readReplicas.startupProbe.enabled` | Enable startupProbe on PostgreSQL read only containers | `false` |
+| `readReplicas.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` |
+| `readReplicas.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
+| `readReplicas.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` |
+| `readReplicas.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` |
+| `readReplicas.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
+| `readReplicas.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
+| `readReplicas.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
+| `readReplicas.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
+| `readReplicas.lifecycleHooks` | for the PostgreSQL read only container to automate configuration before or after startup | `{}` |
+| `readReplicas.resources.limits` | The resources limits for the PostgreSQL read only containers | `{}` |
+| `readReplicas.resources.requests.memory` | The requested memory for the PostgreSQL read only containers | `256Mi` |
+| `readReplicas.resources.requests.cpu` | The requested cpu for the PostgreSQL read only containers | `250m` |
+| `readReplicas.podSecurityContext.enabled` | Enable security context | `true` |
+| `readReplicas.podSecurityContext.fsGroup` | Group ID for the pod | `1001` |
+| `readReplicas.containerSecurityContext.enabled` | Enable container security context | `true` |
+| `readReplicas.containerSecurityContext.runAsUser` | User ID for the container | `1001` |
+| `readReplicas.hostAliases` | PostgreSQL read only pods host aliases | `[]` |
+| `readReplicas.hostNetwork` | Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) | `false` |
+| `readReplicas.hostIPC` | Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) | `false` |
+| `readReplicas.labels` | Map of labels to add to the statefulset (PostgreSQL read only) | `{}` |
+| `readReplicas.annotations` | Annotations for PostgreSQL read only pods | `{}` |
+| `readReplicas.podLabels` | Map of labels to add to the pods (PostgreSQL read only) | `{}` |
+| `readReplicas.podAnnotations` | Map of annotations to add to the pods (PostgreSQL read only) | `{}` |
+| `readReplicas.podAffinityPreset` | PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `readReplicas.podAntiAffinityPreset` | PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` |
+| `readReplicas.nodeAffinityPreset.type` | PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `readReplicas.nodeAffinityPreset.key` | PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. | `""` |
+| `readReplicas.nodeAffinityPreset.values` | PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. | `[]` |
+| `readReplicas.affinity` | Affinity for PostgreSQL read only pods assignment | `{}` |
+| `readReplicas.nodeSelector` | Node labels for PostgreSQL read only pods assignment | `{}` |
+| `readReplicas.tolerations` | Tolerations for PostgreSQL read only pods assignment | `[]` |
+| `readReplicas.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
+| `readReplicas.priorityClassName` | Priority Class to use for each pod (PostgreSQL read only) | `""` |
+| `readReplicas.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` |
+| `readReplicas.terminationGracePeriodSeconds` | Seconds PostgreSQL read only pod needs to terminate gracefully | `""` |
+| `readReplicas.updateStrategy.type` | PostgreSQL read only statefulset strategy type | `RollingUpdate` |
+| `readReplicas.updateStrategy.rollingUpdate` | PostgreSQL read only statefulset rolling update configuration parameters | `{}` |
+| `readReplicas.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) | `[]` |
+| `readReplicas.extraVolumes` | Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) | `[]` |
+| `readReplicas.sidecars` | Add additional sidecar containers to the PostgreSQL read only pod(s) | `[]` |
+| `readReplicas.initContainers` | Add additional init containers to the PostgreSQL read only pod(s) | `[]` |
+| `readReplicas.extraPodSpec` | Optionally specify extra PodSpec for the PostgreSQL read only pod(s) | `{}` |
+| `readReplicas.service.type` | Kubernetes Service type | `ClusterIP` |
+| `readReplicas.service.ports.postgresql` | PostgreSQL service port | `5432` |
+| `readReplicas.service.nodePorts.postgresql` | Node port for PostgreSQL | `""` |
+| `readReplicas.service.clusterIP` | Static clusterIP or None for headless services | `""` |
+| `readReplicas.service.annotations` | Annotations for PostgreSQL read only service | `{}` |
+| `readReplicas.service.loadBalancerIP` | Load balancer IP if service type is `LoadBalancer` | `""` |
+| `readReplicas.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` |
+| `readReplicas.service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` |
+| `readReplicas.service.extraPorts` | Extra ports to expose in the PostgreSQL read only service | `[]` |
+| `readReplicas.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
+| `readReplicas.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
+| `readReplicas.service.headless.annotations` | Additional custom annotations for headless PostgreSQL read only service | `{}` |
+| `readReplicas.persistence.enabled` | Enable PostgreSQL read only data persistence using PVC | `true` |
+| `readReplicas.persistence.existingClaim` | Name of an existing PVC to use | `""` |
+| `readReplicas.persistence.mountPath` | The path the volume will be mounted at | `/bitnami/postgresql` |
+| `readReplicas.persistence.subPath` | The subdirectory of the volume to mount to | `""` |
+| `readReplicas.persistence.storageClass` | PVC Storage Class for PostgreSQL read only data volume | `""` |
+| `readReplicas.persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `["ReadWriteOnce"]` |
+| `readReplicas.persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` |
+| `readReplicas.persistence.annotations` | Annotations for the PVC | `{}` |
+| `readReplicas.persistence.labels` | Labels for the PVC | `{}` |
+| `readReplicas.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` |
+| `readReplicas.persistence.dataSource` | Custom PVC data source | `{}` |
+
+### NetworkPolicy parameters
+
+| Name | Description | Value |
+| ------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
+| `networkPolicy.enabled` | Enable network policies | `false` |
+| `networkPolicy.metrics.enabled` | Enable network policies for metrics (prometheus) | `false` |
+| `networkPolicy.metrics.namespaceSelector` | Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace. | `{}` |
+| `networkPolicy.metrics.podSelector` | Monitoring pod selector labels. These labels will be used to identify the Prometheus pods. | `{}` |
+| `networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled` | Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin. | `false` |
+| `networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector` | Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s). | `{}` |
+| `networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector` | Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s). | `{}` |
+| `networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules` | Custom network policy for the PostgreSQL primary node. | `[]` |
+| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled` | Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin. | `false` |
+| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector` | Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s). | `{}` |
+| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector` | Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s). | `{}` |
+| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules` | Custom network policy for the PostgreSQL read-only nodes. | `[]` |
+| `networkPolicy.egressRules.denyConnectionsToExternal` | Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53). | `false` |
+| `networkPolicy.egressRules.customRules` | Custom network policy rule | `[]` |
+
+### Volume Permissions parameters
+
+| Name | Description | Value |
+| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------- |
+| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` |
+| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
+| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` |
+| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r120` |
+| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
+| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
+| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` |
+| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` |
+| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` |
+| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` |
+
+### Other Parameters
+
+| Name | Description | Value |
+| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
+| `serviceBindings.enabled` | Create secret for service binding (Experimental) | `false` |
+| `serviceAccount.create` | Enable creation of ServiceAccount for PostgreSQL pod | `false` |
+| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
+| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` |
+| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` |
+| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` |
+| `rbac.rules` | Custom RBAC rules to set | `[]` |
+| `psp.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` |
+
+### Metrics Parameters
+
+| Name | Description | Value |
+| ----------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | --------------------------- |
+| `metrics.enabled` | Start a prometheus exporter | `false` |
+| `metrics.image.registry` | PostgreSQL Prometheus Exporter image registry | `docker.io` |
+| `metrics.image.repository` | PostgreSQL Prometheus Exporter image repository | `bitnami/postgres-exporter` |
+| `metrics.image.tag` | PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) | `0.12.0-debian-11-r91` |
+| `metrics.image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
+| `metrics.image.pullPolicy` | PostgreSQL Prometheus Exporter image pull policy | `IfNotPresent` |
+| `metrics.image.pullSecrets` | Specify image pull secrets | `[]` |
+| `metrics.customMetrics` | Define additional custom metrics | `{}` |
+| `metrics.extraEnvVars` | Extra environment variables to add to PostgreSQL Prometheus exporter | `[]` |
+| `metrics.containerSecurityContext.enabled` | Enable PostgreSQL Prometheus exporter containers' Security Context | `true` |
+| `metrics.containerSecurityContext.runAsUser` | Set PostgreSQL Prometheus exporter containers' Security Context runAsUser | `1001` |
+| `metrics.containerSecurityContext.runAsNonRoot` | Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot | `true` |
+| `metrics.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL Prometheus exporter containers | `true` |
+| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` |
+| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
+| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
+| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
+| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
+| `metrics.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL Prometheus exporter containers | `true` |
+| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
+| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
+| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` |
+| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` |
+| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
+| `metrics.startupProbe.enabled` | Enable startupProbe on PostgreSQL Prometheus exporter containers | `false` |
+| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` |
+| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
+| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` |
+| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` |
+| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
+| `metrics.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
+| `metrics.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
+| `metrics.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
+| `metrics.containerPorts.metrics` | PostgreSQL Prometheus exporter metrics container port | `9187` |
+| `metrics.resources.limits` | The resources limits for the PostgreSQL Prometheus exporter container | `{}` |
+| `metrics.resources.requests` | The requested resources for the PostgreSQL Prometheus exporter container | `{}` |
+| `metrics.service.ports.metrics` | PostgreSQL Prometheus Exporter service port | `9187` |
+| `metrics.service.clusterIP` | Static clusterIP or None for headless services | `""` |
+| `metrics.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` |
+| `metrics.service.annotations` | Annotations for Prometheus to auto-discover the metrics endpoint | `{}` |
+| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` |
+| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` |
+| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` |
+| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` |
+| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` |
+| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` |
+| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
+| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` |
+| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` |
+| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` |
+| `metrics.prometheusRule.enabled` | Create a PrometheusRule for Prometheus Operator | `false` |
+| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` |
+| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` |
+| `metrics.prometheusRule.rules` | PrometheusRule definitions | `[]` |
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+helm install my-release \
+ --set auth.postgresPassword=secretpassword
+ oci://registry-1.docker.io/bitnamicharts/postgresql
+```
+
+The above command sets the PostgreSQL `postgres` account password to `secretpassword`.
+
+> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available.
+> **Warning** Setting a password will be ignored on new installation in case when previous Posgresql release was deleted through the helm command. In that case, old PVC will have an old password, and setting it through helm won't take effect. Deleting persistent volumes (PVs) will solve the issue. Refer to [issue 2061](https://github.com/bitnami/charts/issues/2061) for more details
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
+
+```console
+helm install my-release -f values.yaml oci://registry-1.docker.io/bitnamicharts/postgresql
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml)
+
+## Configuration and installation details
+
+### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
+
+It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
+
+Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
+
+### Customizing primary and read replica services in a replicated configuration
+
+At the top level, there is a service object which defines the services for both primary and readReplicas. For deeper customization, there are service objects for both the primary and read types individually. This allows you to override the values in the top level service object so that the primary and read can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the primary and read to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the primary.service or readReplicas.service objects will take precedence over the top level service object.
+
+### Use a different PostgreSQL version
+
+To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter. Refer to the [chart documentation for more information on these parameters and how to use them with images from a private registry](https://docs.bitnami.com/kubernetes/infrastructure/postgresql/configuration/change-image-version/).
+
+### postgresql.conf / pg_hba.conf files as configMap
+
+This helm chart also supports to customize the PostgreSQL configuration file. You can add additional PostgreSQL configuration parameters using the `primary.extendedConfiguration`/`readReplicas.extendedConfiguration` parameters as a string. Alternatively, to replace the entire default configuration use `primary.configuration`.
+
+You can also add a custom pg_hba.conf using the `primary.pgHbaConfiguration` parameter.
+
+In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `primary.existingConfigmap` parameter. Note that this will override the two previous options.
+
+### Initialize a fresh instance
+
+The [Bitnami PostgreSQL](https://github.com/bitnami/containers/tree/main/bitnami/postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, you can specify custom scripts using the `primary.initdb.scripts` parameter as a string.
+
+In addition, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `primary.initdb.scriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `primary.initdb.scriptsSecret` parameter.
+
+The allowed extensions are `.sh`, `.sql` and `.sql.gz`.
+
+### Securing traffic using TLS
+
+TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart:
+
+- `tls.enabled`: Enable TLS support. Defaults to `false`
+- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults.
+- `tls.certFilename`: Certificate filename. No defaults.
+- `tls.certKeyFilename`: Certificate key filename. No defaults.
+
+For example:
+
+- First, create the secret with the cetificates files:
+
+ ```console
+ kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt
+ ```
+
+- Then, use the following parameters:
+
+ ```console
+ volumePermissions.enabled=true
+ tls.enabled=true
+ tls.certificatesSecret="certificates-tls-secret"
+ tls.certFilename="cert.crt"
+ tls.certKeyFilename="cert.key"
+ ```
+
+ > Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `containerSecurityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected.
+
+### Sidecars
+
+If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec.
+
+```yaml
+# For the PostgreSQL primary
+primary:
+ sidecars:
+ - name: your-image-name
+ image: your-image
+ imagePullPolicy: Always
+ ports:
+ - name: portname
+ containerPort: 1234
+# For the PostgreSQL replicas
+readReplicas:
+ sidecars:
+ - name: your-image-name
+ image: your-image
+ imagePullPolicy: Always
+ ports:
+ - name: portname
+ containerPort: 1234
+```
+
+### Metrics
+
+The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml).
+
+The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details.
+
+### Use of global variables
+
+In more complex scenarios, we may have the following tree of dependencies
+
+```text
+ +--------------+
+ | |
+ +------------+ Chart 1 +-----------+
+ | | | |
+ | --------+------+ |
+ | | |
+ | | |
+ | | |
+ | | |
+ v v v
++-------+------+ +--------+------+ +--------+------+
+| | | | | |
+| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 |
+| | | | | |
++--------------+ +---------------+ +---------------+
+```
+
+The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters:
+
+```text
+postgresql.auth.username=testuser
+subchart1.postgresql.auth.username=testuser
+subchart2.postgresql.auth.username=testuser
+postgresql.auth.password=testpass
+subchart1.postgresql.auth.password=testpass
+subchart2.postgresql.auth.password=testpass
+postgresql.auth.database=testdb
+subchart1.postgresql.auth.database=testdb
+subchart2.postgresql.auth.database=testdb
+```
+
+If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows:
+
+```text
+global.postgresql.auth.username=testuser
+global.postgresql.auth.password=testpass
+global.postgresql.auth.database=testdb
+```
+
+This way, the credentials will be available in all of the subcharts.
+
+## Persistence
+
+The [Bitnami PostgreSQL](https://github.com/bitnami/containers/tree/main/bitnami/postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container.
+
+Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube.
+See the [Parameters](#parameters) section to configure the PVC or to disable persistence.
+
+If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to the [code present in the container repository](https://github.com/bitnami/containers/tree/main/bitnami/postgresql). If you need to use those data, please covert them to sql and import after `helm install` finished.
+
+## NetworkPolicy
+
+To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`.
+
+For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace:
+
+```console
+kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}"
+```
+
+With NetworkPolicy enabled, traffic will be limited to just port 5432.
+
+For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL.
+This label will be displayed in the output of a successful install.
+
+## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image
+
+- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image.
+- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift.
+- For OpenShift up to 4.10, let set the volume permissions, security context, runAsUser and fsGroup automatically by OpenShift and disable the predefined settings of the helm chart: primary.securityContext.enabled=false,primary.containerSecurityContext.enabled=false,volumePermissions.enabled=false,shmVolume.enabled=false
+- For OpenShift 4.11 and higher, let set OpenShift the runAsUser and fsGroup automatically. Configure the pod and container security context to restrictive defaults and disable the volume permissions setup: primary.
+ podSecurityContext.fsGroup=null,primary.podSecurityContext.seccompProfile.type=RuntimeDefault,primary.containerSecurityContext.runAsUser=null,primary.containerSecurityContext.allowPrivilegeEscalation=false,primary.containerSecurityContext.runAsNonRoot=true,primary.containerSecurityContext.seccompProfile.type=RuntimeDefault,primary.containerSecurityContext.capabilities.drop=['ALL'],volumePermissions.enabled=false,shmVolume.enabled=false
+
+### Setting Pod's affinity
+
+This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
+
+As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters.
+
+## Troubleshooting
+
+Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
+
+## Upgrading
+
+### To 12.0.0
+
+This major version changes the default PostgreSQL image from 14.x to 15.x. Follow the [official instructions](https://www.postgresql.org/docs/15/upgrading.html) to upgrade to 15.x.
+
+### To any previous version
+
+Refer to the [chart documentation for more information about how to upgrade from previous releases](https://docs.bitnami.com/kubernetes/infrastructure/postgresql/administration/upgrade/).
+
+## License
+
+Copyright © 2023 Bitnami
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+<http://www.apache.org/licenses/LICENSE-2.0>
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
\ No newline at end of file
diff --git a/charts/postgresql/charts/common/.helmignore b/charts/postgresql/charts/common/.helmignore
new file mode 100644
index 0000000..50af031
--- /dev/null
+++ b/charts/postgresql/charts/common/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/postgresql/charts/common/Chart.yaml b/charts/postgresql/charts/common/Chart.yaml
new file mode 100644
index 0000000..4fc56bb
--- /dev/null
+++ b/charts/postgresql/charts/common/Chart.yaml
@@ -0,0 +1,23 @@
+annotations:
+ category: Infrastructure
+ licenses: Apache-2.0
+apiVersion: v2
+appVersion: 2.4.0
+description: A Library Helm Chart for grouping common logic between bitnami charts.
+ This chart is not deployable by itself.
+home: https://bitnami.com
+icon: https://bitnami.com/downloads/logos/bitnami-mark.png
+keywords:
+- common
+- helper
+- template
+- function
+- bitnami
+maintainers:
+- name: VMware, Inc.
+ url: https://github.com/bitnami/charts
+name: common
+sources:
+- https://github.com/bitnami/charts
+type: library
+version: 2.4.0
diff --git a/charts/postgresql/charts/common/README.md b/charts/postgresql/charts/common/README.md
new file mode 100644
index 0000000..72fca33
--- /dev/null
+++ b/charts/postgresql/charts/common/README.md
@@ -0,0 +1,235 @@
+# Bitnami Common Library Chart
+
+A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts.
+
+Looking to use our applications in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog.
+
+## TL;DR
+
+```yaml
+dependencies:
+ - name: common
+ version: 1.x.x
+ repository: oci://registry-1.docker.io/bitnamicharts
+```
+
+```console
+helm dependency update
+```
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.names.fullname" . }}
+data:
+ myvalue: "Hello World"
+```
+
+## Introduction
+
+This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
+
+## Prerequisites
+
+- Kubernetes 1.19+
+- Helm 3.2.0+
+
+## Parameters
+
+## Special input schemas
+
+### ImageRoot
+
+```yaml
+registry:
+ type: string
+ description: Docker registry where the image is located
+ example: docker.io
+
+repository:
+ type: string
+ description: Repository and image name
+ example: bitnami/nginx
+
+tag:
+ type: string
+ description: image tag
+ example: 1.16.1-debian-10-r63
+
+pullPolicy:
+ type: string
+ description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+
+pullSecrets:
+ type: array
+ items:
+ type: string
+ description: Optionally specify an array of imagePullSecrets (evaluated as templates).
+
+debug:
+ type: boolean
+ description: Set to true if you would like to see extra information on logs
+ example: false
+
+## An instance would be:
+# registry: docker.io
+# repository: bitnami/nginx
+# tag: 1.16.1-debian-10-r63
+# pullPolicy: IfNotPresent
+# debug: false
+```
+
+### Persistence
+
+```yaml
+enabled:
+ type: boolean
+ description: Whether enable persistence.
+ example: true
+
+storageClass:
+ type: string
+ description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning.
+ example: "-"
+
+accessMode:
+ type: string
+ description: Access mode for the Persistent Volume Storage.
+ example: ReadWriteOnce
+
+size:
+ type: string
+ description: Size the Persistent Volume Storage.
+ example: 8Gi
+
+path:
+ type: string
+ description: Path to be persisted.
+ example: /bitnami
+
+## An instance would be:
+# enabled: true
+# storageClass: "-"
+# accessMode: ReadWriteOnce
+# size: 8Gi
+# path: /bitnami
+```
+
+### ExistingSecret
+
+```yaml
+name:
+ type: string
+ description: Name of the existing secret.
+ example: mySecret
+keyMapping:
+ description: Mapping between the expected key name and the name of the key in the existing secret.
+ type: object
+
+## An instance would be:
+# name: mySecret
+# keyMapping:
+# password: myPasswordKey
+```
+
+#### Example of use
+
+When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets.
+
+```yaml
+# templates/secret.yaml
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ labels:
+ app: {{ include "common.names.fullname" . }}
+type: Opaque
+data:
+ password: {{ .Values.password | b64enc | quote }}
+
+# templates/dpl.yaml
+---
+...
+ env:
+ - name: PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }}
+ key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }}
+...
+
+# values.yaml
+---
+name: mySecret
+keyMapping:
+ password: myPasswordKey
+```
+
+### ValidateValue
+
+#### NOTES.txt
+
+```console
+{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}}
+{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}}
+
+{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
+```
+
+If we force those values to be empty we will see some alerts
+
+```console
+helm install test mychart --set path.to.value00="",path.to.value01=""
+ 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value:
+
+ export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d)
+
+ 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value:
+
+ export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d)
+```
+
+## Upgrading
+
+### To 1.0.0
+
+[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
+
+#### What changes were introduced in this major version?
+
+- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
+- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information.
+- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts
+
+#### Considerations when upgrading to this version
+
+- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues
+- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore
+- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3
+
+#### Useful links
+
+- <https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/>
+- <https://helm.sh/docs/topics/v2_v3_migration/>
+- <https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/>
+
+## License
+
+Copyright © 2023 Bitnami
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+<http://www.apache.org/licenses/LICENSE-2.0>
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/charts/postgresql/charts/common/templates/_affinities.tpl b/charts/postgresql/charts/common/templates/_affinities.tpl
new file mode 100644
index 0000000..81902a6
--- /dev/null
+++ b/charts/postgresql/charts/common/templates/_affinities.tpl
@@ -0,0 +1,106 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return a soft nodeAffinity definition
+{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.soft" -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: {{ .key }}
+ operator: In
+ values:
+ {{- range .values }}
+ - {{ . | quote }}
+ {{- end }}
+ weight: 1
+{{- end -}}
+
+{{/*
+Return a hard nodeAffinity definition
+{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.hard" -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: {{ .key }}
+ operator: In
+ values:
+ {{- range .values }}
+ - {{ . | quote }}
+ {{- end }}
+{{- end -}}
+
+{{/*
+Return a nodeAffinity definition
+{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes" -}}
+ {{- if eq .type "soft" }}
+ {{- include "common.affinities.nodes.soft" . -}}
+ {{- else if eq .type "hard" }}
+ {{- include "common.affinities.nodes.hard" . -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Return a topologyKey definition
+{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}}
+*/}}
+{{- define "common.affinities.topologyKey" -}}
+{{ .topologyKey | default "kubernetes.io/hostname" -}}
+{{- end -}}
+
+{{/*
+Return a soft podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.soft" -}}
+{{- $component := default "" .component -}}
+{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }}
+ {{- if not (empty $component) }}
+ {{ printf "app.kubernetes.io/component: %s" $component }}
+ {{- end }}
+ {{- range $key, $value := $extraMatchLabels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+ weight: 1
+{{- end -}}
+
+{{/*
+Return a hard podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.hard" -}}
+{{- $component := default "" .component -}}
+{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }}
+ {{- if not (empty $component) }}
+ {{ printf "app.kubernetes.io/component: %s" $component }}
+ {{- end }}
+ {{- range $key, $value := $extraMatchLabels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+{{- end -}}
+
+{{/*
+Return a podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.pods" -}}
+ {{- if eq .type "soft" }}
+ {{- include "common.affinities.pods.soft" . -}}
+ {{- else if eq .type "hard" }}
+ {{- include "common.affinities.pods.hard" . -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/postgresql/charts/common/templates/_capabilities.tpl b/charts/postgresql/charts/common/templates/_capabilities.tpl
new file mode 100644
index 0000000..697486a
--- /dev/null
+++ b/charts/postgresql/charts/common/templates/_capabilities.tpl
@@ -0,0 +1,180 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return the target Kubernetes version
+*/}}
+{{- define "common.capabilities.kubeVersion" -}}
+{{- if .Values.global }}
+ {{- if .Values.global.kubeVersion }}
+ {{- .Values.global.kubeVersion -}}
+ {{- else }}
+ {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
+ {{- end -}}
+{{- else }}
+{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for poddisruptionbudget.
+*/}}
+{{- define "common.capabilities.policy.apiVersion" -}}
+{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "policy/v1beta1" -}}
+{{- else -}}
+{{- print "policy/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for networkpolicy.
+*/}}
+{{- define "common.capabilities.networkPolicy.apiVersion" -}}
+{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for cronjob.
+*/}}
+{{- define "common.capabilities.cronjob.apiVersion" -}}
+{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "batch/v1beta1" -}}
+{{- else -}}
+{{- print "batch/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for daemonset.
+*/}}
+{{- define "common.capabilities.daemonset.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for deployment.
+*/}}
+{{- define "common.capabilities.deployment.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for statefulset.
+*/}}
+{{- define "common.capabilities.statefulset.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apps/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for ingress.
+*/}}
+{{- define "common.capabilities.ingress.apiVersion" -}}
+{{- if .Values.ingress -}}
+{{- if .Values.ingress.apiVersion -}}
+{{- .Values.ingress.apiVersion -}}
+{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end }}
+{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for RBAC resources.
+*/}}
+{{- define "common.capabilities.rbac.apiVersion" -}}
+{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "rbac.authorization.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "rbac.authorization.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for CRDs.
+*/}}
+{{- define "common.capabilities.crd.apiVersion" -}}
+{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiextensions.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiextensions.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for APIService.
+*/}}
+{{- define "common.capabilities.apiService.apiVersion" -}}
+{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiregistration.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiregistration.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for Horizontal Pod Autoscaler.
+*/}}
+{{- define "common.capabilities.hpa.apiVersion" -}}
+{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}}
+{{- if .beta2 -}}
+{{- print "autoscaling/v2beta2" -}}
+{{- else -}}
+{{- print "autoscaling/v2beta1" -}}
+{{- end -}}
+{{- else -}}
+{{- print "autoscaling/v2" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for Vertical Pod Autoscaler.
+*/}}
+{{- define "common.capabilities.vpa.apiVersion" -}}
+{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}}
+{{- if .beta2 -}}
+{{- print "autoscaling/v2beta2" -}}
+{{- else -}}
+{{- print "autoscaling/v2beta1" -}}
+{{- end -}}
+{{- else -}}
+{{- print "autoscaling/v2" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if the used Helm version is 3.3+.
+A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure.
+This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error.
+**To be removed when the catalog's minimun Helm version is 3.3**
+*/}}
+{{- define "common.capabilities.supportsHelmVersion" -}}
+{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/postgresql/charts/common/templates/_errors.tpl b/charts/postgresql/charts/common/templates/_errors.tpl
new file mode 100644
index 0000000..a79cc2e
--- /dev/null
+++ b/charts/postgresql/charts/common/templates/_errors.tpl
@@ -0,0 +1,23 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Through error when upgrading using empty passwords values that must not be empty.
+
+Usage:
+{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}}
+{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}}
+{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }}
+
+Required password params:
+ - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error.
+ - context - Context - Required. Parent context.
+*/}}
+{{- define "common.errors.upgrade.passwords.empty" -}}
+ {{- $validationErrors := join "" .validationErrors -}}
+ {{- if and $validationErrors .context.Release.IsUpgrade -}}
+ {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}}
+ {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}}
+ {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}}
+ {{- $errorString = print $errorString "\n%s" -}}
+ {{- printf $errorString $validationErrors | fail -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/postgresql/charts/common/templates/_images.tpl b/charts/postgresql/charts/common/templates/_images.tpl
new file mode 100644
index 0000000..d60c22e
--- /dev/null
+++ b/charts/postgresql/charts/common/templates/_images.tpl
@@ -0,0 +1,80 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Return the proper image name
+{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }}
+*/}}
+{{- define "common.images.image" -}}
+{{- $registryName := .imageRoot.registry -}}
+{{- $repositoryName := .imageRoot.repository -}}
+{{- $separator := ":" -}}
+{{- $termination := .imageRoot.tag | toString -}}
+{{- if .global }}
+ {{- if .global.imageRegistry }}
+ {{- $registryName = .global.imageRegistry -}}
+ {{- end -}}
+{{- end -}}
+{{- if .imageRoot.digest }}
+ {{- $separator = "@" -}}
+ {{- $termination = .imageRoot.digest | toString -}}
+{{- end -}}
+{{- if $registryName }}
+ {{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
+{{- else -}}
+ {{- printf "%s%s%s" $repositoryName $separator $termination -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead)
+{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }}
+*/}}
+{{- define "common.images.pullSecrets" -}}
+ {{- $pullSecrets := list }}
+
+ {{- if .global }}
+ {{- range .global.imagePullSecrets -}}
+ {{- $pullSecrets = append $pullSecrets . -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- range .images -}}
+ {{- range .pullSecrets -}}
+ {{- $pullSecrets = append $pullSecrets . -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- if (not (empty $pullSecrets)) }}
+imagePullSecrets:
+ {{- range $pullSecrets | uniq }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names evaluating values as templates
+{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }}
+*/}}
+{{- define "common.images.renderPullSecrets" -}}
+ {{- $pullSecrets := list }}
+ {{- $context := .context }}
+
+ {{- if $context.Values.global }}
+ {{- range $context.Values.global.imagePullSecrets -}}
+ {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- range .images -}}
+ {{- range .pullSecrets -}}
+ {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
+ {{- end -}}
+ {{- end -}}
+
+ {{- if (not (empty $pullSecrets)) }}
+imagePullSecrets:
+ {{- range $pullSecrets | uniq }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+{{- end -}}
diff --git a/charts/postgresql/charts/common/templates/_ingress.tpl b/charts/postgresql/charts/common/templates/_ingress.tpl
new file mode 100644
index 0000000..831da9c
--- /dev/null
+++ b/charts/postgresql/charts/common/templates/_ingress.tpl
@@ -0,0 +1,68 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Generate backend entry that is compatible with all Kubernetes API versions.
+
+Usage:
+{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }}
+
+Params:
+ - serviceName - String. Name of an existing service backend
+ - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer.
+ - context - Dict - Required. The context for the template evaluation.
+*/}}
+{{- define "common.ingress.backend" -}}
+{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}}
+{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}}
+serviceName: {{ .serviceName }}
+servicePort: {{ .servicePort }}
+{{- else -}}
+service:
+ name: {{ .serviceName }}
+ port:
+ {{- if typeIs "string" .servicePort }}
+ name: {{ .servicePort }}
+ {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }}
+ number: {{ .servicePort | int }}
+ {{- end }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Print "true" if the API pathType field is supported
+Usage:
+{{ include "common.ingress.supportsPathType" . }}
+*/}}
+{{- define "common.ingress.supportsPathType" -}}
+{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}}
+{{- print "false" -}}
+{{- else -}}
+{{- print "true" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if the ingressClassname field is supported
+Usage:
+{{ include "common.ingress.supportsIngressClassname" . }}
+*/}}
+{{- define "common.ingress.supportsIngressClassname" -}}
+{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "false" -}}
+{{- else -}}
+{{- print "true" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if cert-manager required annotations for TLS signed
+certificates are set in the Ingress annotations
+Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
+Usage:
+{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }}
+*/}}
+{{- define "common.ingress.certManagerRequest" -}}
+{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/postgresql/charts/common/templates/_labels.tpl b/charts/postgresql/charts/common/templates/_labels.tpl
new file mode 100644
index 0000000..252066c
--- /dev/null
+++ b/charts/postgresql/charts/common/templates/_labels.tpl
@@ -0,0 +1,18 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Kubernetes standard labels
+*/}}
+{{- define "common.labels.standard" -}}
+app.kubernetes.io/name: {{ include "common.names.name" . }}
+helm.sh/chart: {{ include "common.names.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector
+*/}}
+{{- define "common.labels.matchLabels" -}}
+app.kubernetes.io/name: {{ include "common.names.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
diff --git a/charts/postgresql/charts/common/templates/_names.tpl b/charts/postgresql/charts/common/templates/_names.tpl
new file mode 100644
index 0000000..617a234
--- /dev/null
+++ b/charts/postgresql/charts/common/templates/_names.tpl
@@ -0,0 +1,66 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "common.names.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "common.names.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "common.names.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified dependency name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+Usage:
+{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }}
+*/}}
+{{- define "common.names.dependency.fullname" -}}
+{{- if .chartValues.fullnameOverride -}}
+{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .chartName .chartValues.nameOverride -}}
+{{- if contains $name .context.Release.Name -}}
+{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Allow the release namespace to be overridden for multi-namespace deployments in combined charts.
+*/}}
+{{- define "common.names.namespace" -}}
+{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a fully qualified app name adding the installation's namespace.
+*/}}
+{{- define "common.names.fullname.namespace" -}}
+{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/charts/postgresql/charts/common/templates/_secrets.tpl b/charts/postgresql/charts/common/templates/_secrets.tpl
new file mode 100644
index 0000000..a1708b2
--- /dev/null
+++ b/charts/postgresql/charts/common/templates/_secrets.tpl
@@ -0,0 +1,165 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Generate secret name.
+
+Usage:
+{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }}
+
+Params:
+ - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
+ to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+ +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
+ - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment.
+ - context - Dict - Required. The context for the template evaluation.
+*/}}
+{{- define "common.secrets.name" -}}
+{{- $name := (include "common.names.fullname" .context) -}}
+
+{{- if .defaultNameSuffix -}}
+{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- with .existingSecret -}}
+{{- if not (typeIs "string" .) -}}
+{{- with .name -}}
+{{- $name = . -}}
+{{- end -}}
+{{- else -}}
+{{- $name = . -}}
+{{- end -}}
+{{- end -}}
+
+{{- printf "%s" $name -}}
+{{- end -}}
+
+{{/*
+Generate secret key.
+
+Usage:
+{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }}
+
+Params:
+ - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
+ to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+ +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
+ - key - String - Required. Name of the key in the secret.
+*/}}
+{{- define "common.secrets.key" -}}
+{{- $key := .key -}}
+
+{{- if .existingSecret -}}
+ {{- if not (typeIs "string" .existingSecret) -}}
+ {{- if .existingSecret.keyMapping -}}
+ {{- $key = index .existingSecret.keyMapping $.key -}}
+ {{- end -}}
+ {{- end }}
+{{- end -}}
+
+{{- printf "%s" $key -}}
+{{- end -}}
+
+{{/*
+Generate secret password or retrieve one if already created.
+
+Usage:
+{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }}
+
+Params:
+ - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+ - key - String - Required - Name of the key in the secret.
+ - providedValues - List<String> - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
+ - length - int - Optional - Length of the generated random password.
+ - strong - Boolean - Optional - Whether to add symbols to the generated random password.
+ - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart.
+ - context - Context - Required - Parent context.
+
+The order in which this function returns a secret password:
+ 1. Already existing 'Secret' resource
+ (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned)
+ 2. Password provided via the values.yaml
+ (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned)
+ 3. Randomly generated secret password
+ (A new random secret password with the length specified in the 'length' parameter will be generated and returned)
+
+*/}}
+{{- define "common.secrets.passwords.manage" -}}
+
+{{- $password := "" }}
+{{- $subchart := "" }}
+{{- $chartName := default "" .chartName }}
+{{- $passwordLength := default 10 .length }}
+{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }}
+{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }}
+{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }}
+{{- if $secretData }}
+ {{- if hasKey $secretData .key }}
+ {{- $password = index $secretData .key | quote }}
+ {{- else }}
+ {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}}
+ {{- end -}}
+{{- else if $providedPasswordValue }}
+ {{- $password = $providedPasswordValue | toString | b64enc | quote }}
+{{- else }}
+
+ {{- if .context.Values.enabled }}
+ {{- $subchart = $chartName }}
+ {{- end -}}
+
+ {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}}
+ {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}}
+ {{- $passwordValidationErrors := list $requiredPasswordError -}}
+ {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}}
+
+ {{- if .strong }}
+ {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }}
+ {{- $password = randAscii $passwordLength }}
+ {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }}
+ {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }}
+ {{- else }}
+ {{- $password = randAlphaNum $passwordLength | b64enc | quote }}
+ {{- end }}
+{{- end -}}
+{{- printf "%s" $password -}}
+{{- end -}}
+
+{{/*
+Reuses the value from an existing secret, otherwise sets its value to a default value.
+
+Usage:
+{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }}
+
+Params:
+ - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+ - key - String - Required - Name of the key in the secret.
+ - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
+ - context - Context - Required - Parent context.
+
+*/}}
+{{- define "common.secrets.lookup" -}}
+{{- $value := "" -}}
+{{- $defaultValue := required "\n'common.secrets.lookup': Argument 'defaultValue' missing or empty" .defaultValue -}}
+{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}}
+{{- if and $secretData (hasKey $secretData .key) -}}
+ {{- $value = index $secretData .key -}}
+{{- else -}}
+ {{- $value = $defaultValue | toString | b64enc -}}
+{{- end -}}
+{{- printf "%s" $value -}}
+{{- end -}}
+
+{{/*
+Returns whether a previous generated secret already exists
+
+Usage:
+{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }}
+
+Params:
+ - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+ - context - Context - Required - Parent context.
+*/}}
+{{- define "common.secrets.exists" -}}
+{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }}
+{{- if $secret }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/postgresql/charts/common/templates/_storage.tpl b/charts/postgresql/charts/common/templates/_storage.tpl
new file mode 100644
index 0000000..60e2a84
--- /dev/null
+++ b/charts/postgresql/charts/common/templates/_storage.tpl
@@ -0,0 +1,23 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Return the proper Storage Class
+{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }}
+*/}}
+{{- define "common.storage.class" -}}
+
+{{- $storageClass := .persistence.storageClass -}}
+{{- if .global -}}
+ {{- if .global.storageClass -}}
+ {{- $storageClass = .global.storageClass -}}
+ {{- end -}}
+{{- end -}}
+
+{{- if $storageClass -}}
+ {{- if (eq "-" $storageClass) -}}
+ {{- printf "storageClassName: \"\"" -}}
+ {{- else }}
+ {{- printf "storageClassName: %s" $storageClass -}}
+ {{- end -}}
+{{- end -}}
+
+{{- end -}}
diff --git a/charts/postgresql/charts/common/templates/_tplvalues.tpl b/charts/postgresql/charts/common/templates/_tplvalues.tpl
new file mode 100644
index 0000000..2db1668
--- /dev/null
+++ b/charts/postgresql/charts/common/templates/_tplvalues.tpl
@@ -0,0 +1,13 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Renders a value that contains template.
+Usage:
+{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }}
+*/}}
+{{- define "common.tplvalues.render" -}}
+ {{- if typeIs "string" .value }}
+ {{- tpl .value .context }}
+ {{- else }}
+ {{- tpl (.value | toYaml) .context }}
+ {{- end }}
+{{- end -}}
diff --git a/charts/postgresql/charts/common/templates/_utils.tpl b/charts/postgresql/charts/common/templates/_utils.tpl
new file mode 100644
index 0000000..b1ead50
--- /dev/null
+++ b/charts/postgresql/charts/common/templates/_utils.tpl
@@ -0,0 +1,62 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Print instructions to get a secret value.
+Usage:
+{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }}
+*/}}
+{{- define "common.utils.secret.getvalue" -}}
+{{- $varname := include "common.utils.fieldToEnvVar" . -}}
+export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d)
+{{- end -}}
+
+{{/*
+Build env var name given a field
+Usage:
+{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }}
+*/}}
+{{- define "common.utils.fieldToEnvVar" -}}
+ {{- $fieldNameSplit := splitList "-" .field -}}
+ {{- $upperCaseFieldNameSplit := list -}}
+
+ {{- range $fieldNameSplit -}}
+ {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}}
+ {{- end -}}
+
+ {{ join "_" $upperCaseFieldNameSplit }}
+{{- end -}}
+
+{{/*
+Gets a value from .Values given
+Usage:
+{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }}
+*/}}
+{{- define "common.utils.getValueFromKey" -}}
+{{- $splitKey := splitList "." .key -}}
+{{- $value := "" -}}
+{{- $latestObj := $.context.Values -}}
+{{- range $splitKey -}}
+ {{- if not $latestObj -}}
+ {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}}
+ {{- end -}}
+ {{- $value = ( index $latestObj . ) -}}
+ {{- $latestObj = $value -}}
+{{- end -}}
+{{- printf "%v" (default "" $value) -}}
+{{- end -}}
+
+{{/*
+Returns first .Values key with a defined value or first of the list if all non-defined
+Usage:
+{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }}
+*/}}
+{{- define "common.utils.getKeyFromList" -}}
+{{- $key := first .keys -}}
+{{- $reverseKeys := reverse .keys }}
+{{- range $reverseKeys }}
+ {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }}
+ {{- if $value -}}
+ {{- $key = . }}
+ {{- end -}}
+{{- end -}}
+{{- printf "%s" $key -}}
+{{- end -}}
diff --git a/charts/postgresql/charts/common/templates/_warnings.tpl b/charts/postgresql/charts/common/templates/_warnings.tpl
new file mode 100644
index 0000000..ae10fa4
--- /dev/null
+++ b/charts/postgresql/charts/common/templates/_warnings.tpl
@@ -0,0 +1,14 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Warning about using rolling tag.
+Usage:
+{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }}
+*/}}
+{{- define "common.warnings.rollingTag" -}}
+
+{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }}
+WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/
+{{- end }}
+
+{{- end -}}
diff --git a/charts/postgresql/charts/common/templates/validations/_cassandra.tpl b/charts/postgresql/charts/common/templates/validations/_cassandra.tpl
new file mode 100644
index 0000000..ded1ae3
--- /dev/null
+++ b/charts/postgresql/charts/common/templates/validations/_cassandra.tpl
@@ -0,0 +1,72 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate Cassandra required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret"
+ - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.cassandra.passwords" -}}
+ {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}}
+ {{- $enabled := include "common.cassandra.values.enabled" . -}}
+ {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}}
+ {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.cassandra.values.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.cassandra.values.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.cassandra.dbUser.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.dbUser.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled cassandra.
+
+Usage:
+{{ include "common.cassandra.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.cassandra.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.cassandra.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key dbUser
+
+Usage:
+{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.cassandra.values.key.dbUser" -}}
+ {{- if .subchart -}}
+ cassandra.dbUser
+ {{- else -}}
+ dbUser
+ {{- end -}}
+{{- end -}}
diff --git a/charts/postgresql/charts/common/templates/validations/_mariadb.tpl b/charts/postgresql/charts/common/templates/validations/_mariadb.tpl
new file mode 100644
index 0000000..b6906ff
--- /dev/null
+++ b/charts/postgresql/charts/common/templates/validations/_mariadb.tpl
@@ -0,0 +1,103 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MariaDB required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret"
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mariadb.passwords" -}}
+ {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}}
+ {{- $enabled := include "common.mariadb.values.enabled" . -}}
+ {{- $architecture := include "common.mariadb.values.architecture" . -}}
+ {{- $authPrefix := include "common.mariadb.values.key.auth" . -}}
+ {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+ {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+ {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+ {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+ {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+ {{- if not (empty $valueUsername) -}}
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+ {{- end -}}
+
+ {{- if (eq $architecture "replication") -}}
+ {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.auth.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mariadb.auth.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.auth.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mariadb.
+
+Usage:
+{{ include "common.mariadb.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mariadb.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.mariadb.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.architecture" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mariadb.architecture -}}
+ {{- else -}}
+ {{- .context.Values.architecture -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.key.auth" -}}
+ {{- if .subchart -}}
+ mariadb.auth
+ {{- else -}}
+ auth
+ {{- end -}}
+{{- end -}}
diff --git a/charts/postgresql/charts/common/templates/validations/_mongodb.tpl b/charts/postgresql/charts/common/templates/validations/_mongodb.tpl
new file mode 100644
index 0000000..f820ec1
--- /dev/null
+++ b/charts/postgresql/charts/common/templates/validations/_mongodb.tpl
@@ -0,0 +1,108 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MongoDB® required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret"
+ - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mongodb.passwords" -}}
+ {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}}
+ {{- $enabled := include "common.mongodb.values.enabled" . -}}
+ {{- $authPrefix := include "common.mongodb.values.key.auth" . -}}
+ {{- $architecture := include "common.mongodb.values.architecture" . -}}
+ {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+ {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+ {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}}
+ {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+ {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}}
+ {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}}
+
+ {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+ {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+ {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }}
+ {{- if and $valueUsername $valueDatabase -}}
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+ {{- end -}}
+
+ {{- if (eq $architecture "replicaset") -}}
+ {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.auth.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mongodb.auth.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.auth.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mongodb.
+
+Usage:
+{{ include "common.mongodb.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mongodb.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.mongodb.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.key.auth" -}}
+ {{- if .subchart -}}
+ mongodb.auth
+ {{- else -}}
+ auth
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.architecture" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mongodb.architecture -}}
+ {{- else -}}
+ {{- .context.Values.architecture -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/postgresql/charts/common/templates/validations/_mysql.tpl b/charts/postgresql/charts/common/templates/validations/_mysql.tpl
new file mode 100644
index 0000000..74472a0
--- /dev/null
+++ b/charts/postgresql/charts/common/templates/validations/_mysql.tpl
@@ -0,0 +1,103 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MySQL required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret"
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mysql.passwords" -}}
+ {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}}
+ {{- $enabled := include "common.mysql.values.enabled" . -}}
+ {{- $architecture := include "common.mysql.values.architecture" . -}}
+ {{- $authPrefix := include "common.mysql.values.key.auth" . -}}
+ {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+ {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+ {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+ {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+ {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+ {{- if not (empty $valueUsername) -}}
+ {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+ {{- end -}}
+
+ {{- if (eq $architecture "replication") -}}
+ {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.auth.existingSecret" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mysql.auth.existingSecret | quote -}}
+ {{- else -}}
+ {{- .context.Values.auth.existingSecret | quote -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mysql.
+
+Usage:
+{{ include "common.mysql.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mysql.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.mysql.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.architecture" -}}
+ {{- if .subchart -}}
+ {{- .context.Values.mysql.architecture -}}
+ {{- else -}}
+ {{- .context.Values.architecture -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.key.auth" -}}
+ {{- if .subchart -}}
+ mysql.auth
+ {{- else -}}
+ auth
+ {{- end -}}
+{{- end -}}
diff --git a/charts/postgresql/charts/common/templates/validations/_postgresql.tpl b/charts/postgresql/charts/common/templates/validations/_postgresql.tpl
new file mode 100644
index 0000000..164ec0d
--- /dev/null
+++ b/charts/postgresql/charts/common/templates/validations/_postgresql.tpl
@@ -0,0 +1,129 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate PostgreSQL required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret"
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.postgresql.passwords" -}}
+ {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}}
+ {{- $enabled := include "common.postgresql.values.enabled" . -}}
+ {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}}
+ {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}}
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+ {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}}
+
+ {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}}
+ {{- if (eq $enabledReplication "true") -}}
+ {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to decide whether evaluate global values.
+
+Usage:
+{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }}
+Params:
+ - key - String - Required. Field to be evaluated within global, e.g: "existingSecret"
+*/}}
+{{- define "common.postgresql.values.use.global" -}}
+ {{- if .context.Values.global -}}
+ {{- if .context.Values.global.postgresql -}}
+ {{- index .context.Values.global.postgresql .key | quote -}}
+ {{- end -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.postgresql.values.existingSecret" (dict "context" $) }}
+*/}}
+{{- define "common.postgresql.values.existingSecret" -}}
+ {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}}
+
+ {{- if .subchart -}}
+ {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}}
+ {{- else -}}
+ {{- default (.context.Values.existingSecret | quote) $globalValue -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled postgresql.
+
+Usage:
+{{ include "common.postgresql.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.postgresql.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.postgresql.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key postgressPassword.
+
+Usage:
+{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.key.postgressPassword" -}}
+ {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}}
+
+ {{- if not $globalValue -}}
+ {{- if .subchart -}}
+ postgresql.postgresqlPassword
+ {{- else -}}
+ postgresqlPassword
+ {{- end -}}
+ {{- else -}}
+ global.postgresql.postgresqlPassword
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled.replication.
+
+Usage:
+{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.enabled.replication" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.postgresql.replication.enabled -}}
+ {{- else -}}
+ {{- printf "%v" .context.Values.replication.enabled -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key replication.password.
+
+Usage:
+{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.key.replicationPassword" -}}
+ {{- if .subchart -}}
+ postgresql.replication.password
+ {{- else -}}
+ replication.password
+ {{- end -}}
+{{- end -}}
diff --git a/charts/postgresql/charts/common/templates/validations/_redis.tpl b/charts/postgresql/charts/common/templates/validations/_redis.tpl
new file mode 100644
index 0000000..dcccfc1
--- /dev/null
+++ b/charts/postgresql/charts/common/templates/validations/_redis.tpl
@@ -0,0 +1,76 @@
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate Redis® required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+ - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret"
+ - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.redis.passwords" -}}
+ {{- $enabled := include "common.redis.values.enabled" . -}}
+ {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}}
+ {{- $standarizedVersion := include "common.redis.values.standarized.version" . }}
+
+ {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }}
+ {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }}
+
+ {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }}
+ {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }}
+
+ {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+ {{- $requiredPasswords := list -}}
+
+ {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}}
+ {{- if eq $useAuth "true" -}}
+ {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}}
+ {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}}
+ {{- end -}}
+
+ {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled redis.
+
+Usage:
+{{ include "common.redis.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.redis.values.enabled" -}}
+ {{- if .subchart -}}
+ {{- printf "%v" .context.Values.redis.enabled -}}
+ {{- else -}}
+ {{- printf "%v" (not .context.Values.enabled) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right prefix path for the values
+
+Usage:
+{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }}
+Params:
+ - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false
+*/}}
+{{- define "common.redis.values.keys.prefix" -}}
+ {{- if .subchart -}}redis.{{- else -}}{{- end -}}
+{{- end -}}
+
+{{/*
+Checks whether the redis chart's includes the standarizations (version >= 14)
+
+Usage:
+{{ include "common.redis.values.standarized.version" (dict "context" $) }}
+*/}}
+{{- define "common.redis.values.standarized.version" -}}
+
+ {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}}
+ {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }}
+
+ {{- if $standarizedAuthValues -}}
+ {{- true -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/postgresql/charts/common/templates/validations/_validations.tpl b/charts/postgresql/charts/common/templates/validations/_validations.tpl
new file mode 100644
index 0000000..9a814cf
--- /dev/null
+++ b/charts/postgresql/charts/common/templates/validations/_validations.tpl
@@ -0,0 +1,46 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate values must not be empty.
+
+Usage:
+{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}}
+{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}}
+{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
+
+Validate value params:
+ - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password"
+ - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret"
+ - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password"
+*/}}
+{{- define "common.validations.values.multiple.empty" -}}
+ {{- range .required -}}
+ {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Validate a value must not be empty.
+
+Usage:
+{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }}
+
+Validate value params:
+ - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password"
+ - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret"
+ - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password"
+ - subchart - String - Optional - Name of the subchart that the validated password is part of.
+*/}}
+{{- define "common.validations.values.single.empty" -}}
+ {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }}
+ {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }}
+
+ {{- if not $value -}}
+ {{- $varname := "my-value" -}}
+ {{- $getCurrentValue := "" -}}
+ {{- if and .secret .field -}}
+ {{- $varname = include "common.utils.fieldToEnvVar" . -}}
+ {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}}
+ {{- end -}}
+ {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/postgresql/charts/common/values.yaml b/charts/postgresql/charts/common/values.yaml
new file mode 100644
index 0000000..f2df68e
--- /dev/null
+++ b/charts/postgresql/charts/common/values.yaml
@@ -0,0 +1,5 @@
+## bitnami/common
+## It is required by CI/CD tools and processes.
+## @skip exampleValue
+##
+exampleValue: common-chart
diff --git a/charts/postgresql/templates/NOTES.txt b/charts/postgresql/templates/NOTES.txt
new file mode 100644
index 0000000..21b3d29
--- /dev/null
+++ b/charts/postgresql/templates/NOTES.txt
@@ -0,0 +1,91 @@
+CHART NAME: {{ .Chart.Name }}
+CHART VERSION: {{ .Chart.Version }}
+APP VERSION: {{ .Chart.AppVersion }}
+
+** Please be patient while the chart is being deployed **
+
+{{- if .Values.diagnosticMode.enabled }}
+The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with:
+
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }}
+
+Get the list of pods by executing:
+
+ kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}
+
+Access the pod you want to debug by executing
+
+ kubectl exec --namespace {{ .Release.Namespace }} -ti <NAME OF THE POD> -- /opt/bitnami/scripts/postgresql/entrypoint.sh /bin/bash
+
+In order to replicate the container startup scripts execute this command:
+
+ /opt/bitnami/scripts/postgresql/entrypoint.sh /opt/bitnami/scripts/postgresql/run.sh
+
+{{- else }}
+
+PostgreSQL can be accessed via port {{ include "postgresql.service.port" . }} on the following DNS names from within your cluster:
+
+ {{ include "postgresql.primary.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection
+
+{{- if eq .Values.architecture "replication" }}
+
+ {{ include "postgresql.readReplica.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read only connection
+
+{{- end }}
+
+{{- $customUser := include "postgresql.username" . }}
+{{- if and (not (empty $customUser)) (ne $customUser "postgres") .Values.auth.enablePostgresUser }}
+
+To get the password for "postgres" run:
+
+ export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.secretName" . }} -o jsonpath="{.data.{{include "postgresql.adminPasswordKey" .}}}" | base64 -d)
+
+To get the password for "{{ $customUser }}" run:
+
+ export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.secretName" . }} -o jsonpath="{.data.{{include "postgresql.userPasswordKey" .}}}" | base64 -d)
+
+{{- else }}
+
+To get the password for "{{ default "postgres" $customUser }}" run:
+
+ export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.secretName" . }} -o jsonpath="{.data.{{ ternary "password" (include "postgresql.adminPasswordKey" .) (and (not (empty $customUser)) (ne $customUser "postgres")) }}}" | base64 -d)
+
+{{- end }}
+
+To connect to your database run the following command:
+
+ kubectl run {{ include "common.names.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ include "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" \
+ --command -- psql --host {{ include "postgresql.primary.fullname" . }} -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }} -p {{ include "postgresql.service.port" . }}
+
+ > NOTE: If you access the container using bash, make sure that you execute "/opt/bitnami/scripts/postgresql/entrypoint.sh /bin/bash" in order to avoid the error "psql: local user with ID {{ .Values.primary.containerSecurityContext.runAsUser }}} does not exist"
+
+To connect to your database from outside the cluster execute the following commands:
+
+{{- if contains "NodePort" .Values.primary.service.type }}
+
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "postgresql.primary.fullname" . }})
+ PGPASSWORD="$POSTGRES_PASSWORD" psql --host $NODE_IP --port $NODE_PORT -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }}
+
+{{- else if contains "LoadBalancer" .Values.primary.service.type }}
+
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "postgresql.primary.fullname" . }}'
+
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "postgresql.primary.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
+ PGPASSWORD="$POSTGRES_PASSWORD" psql --host $SERVICE_IP --port {{ include "postgresql.service.port" . }} -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }}
+
+{{- else if contains "ClusterIP" .Values.primary.service.type }}
+
+ kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "postgresql.primary.fullname" . }} {{ include "postgresql.service.port" . }}:{{ include "postgresql.service.port" . }} &
+ PGPASSWORD="$POSTGRES_PASSWORD" psql --host 127.0.0.1 -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }} -p {{ include "postgresql.service.port" . }}
+
+{{- end }}
+{{- end }}
+
+WARNING: The configured password will be ignored on new installation in case when previous Posgresql release was deleted through the helm command. In that case, old PVC will have an old password, and setting it through helm won't take effect. Deleting persistent volumes (PVs) will solve the issue.
+
+{{- include "postgresql.validateValues" . -}}
+{{- include "common.warnings.rollingTag" .Values.image -}}
+{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }}
diff --git a/charts/postgresql/templates/_helpers.tpl b/charts/postgresql/templates/_helpers.tpl
new file mode 100644
index 0000000..8189380
--- /dev/null
+++ b/charts/postgresql/templates/_helpers.tpl
@@ -0,0 +1,399 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Create a default fully qualified app name for PostgreSQL Primary objects
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "postgresql.primary.fullname" -}}
+{{- if eq .Values.architecture "replication" }}
+ {{- printf "%s-%s" (include "common.names.fullname" .) .Values.primary.name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+ {{- include "common.names.fullname" . -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name for PostgreSQL read-only replicas objects
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "postgresql.readReplica.fullname" -}}
+{{- printf "%s-%s" (include "common.names.fullname" .) .Values.readReplicas.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create the default FQDN for PostgreSQL primary headless service
+We truncate at 63 chars because of the DNS naming spec.
+*/}}
+{{- define "postgresql.primary.svc.headless" -}}
+{{- printf "%s-hl" (include "postgresql.primary.fullname" .) | trunc 63 | trimSuffix "-" }}
+{{- end -}}
+
+{{/*
+Create the default FQDN for PostgreSQL read-only replicas headless service
+We truncate at 63 chars because of the DNS naming spec.
+*/}}
+{{- define "postgresql.readReplica.svc.headless" -}}
+{{- printf "%s-hl" (include "postgresql.readReplica.fullname" .) | trunc 63 | trimSuffix "-" }}
+{{- end -}}
+
+{{/*
+Return the proper PostgreSQL image name
+*/}}
+{{- define "postgresql.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper PostgreSQL metrics image name
+*/}}
+{{- define "postgresql.metrics.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper image name (for the init container volume-permissions image)
+*/}}
+{{- define "postgresql.volumePermissions.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names
+*/}}
+{{- define "postgresql.imagePullSecrets" -}}
+{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.volumePermissions.image) "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the name for a custom user to create
+*/}}
+{{- define "postgresql.username" -}}
+{{- if .Values.global.postgresql.auth.username }}
+ {{- .Values.global.postgresql.auth.username -}}
+{{- else -}}
+ {{- .Values.auth.username -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the name for a custom database to create
+*/}}
+{{- define "postgresql.database" -}}
+{{- if .Values.global.postgresql.auth.database }}
+ {{- printf "%s" (tpl .Values.global.postgresql.auth.database $) -}}
+{{- else if .Values.auth.database -}}
+ {{- printf "%s" (tpl .Values.auth.database $) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the password secret.
+*/}}
+{{- define "postgresql.secretName" -}}
+{{- if .Values.global.postgresql.auth.existingSecret }}
+ {{- printf "%s" (tpl .Values.global.postgresql.auth.existingSecret $) -}}
+{{- else if .Values.auth.existingSecret -}}
+ {{- printf "%s" (tpl .Values.auth.existingSecret $) -}}
+{{- else -}}
+ {{- printf "%s" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the replication-password key.
+*/}}
+{{- define "postgresql.replicationPasswordKey" -}}
+{{- if or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret }}
+ {{- if .Values.global.postgresql.auth.secretKeys.replicationPasswordKey }}
+ {{- printf "%s" (tpl .Values.global.postgresql.auth.secretKeys.replicationPasswordKey $) -}}
+ {{- else if .Values.auth.secretKeys.replicationPasswordKey -}}
+ {{- printf "%s" (tpl .Values.auth.secretKeys.replicationPasswordKey $) -}}
+ {{- else -}}
+ {{- "replication-password" -}}
+ {{- end -}}
+{{- else -}}
+ {{- "replication-password" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the admin-password key.
+*/}}
+{{- define "postgresql.adminPasswordKey" -}}
+{{- if or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret }}
+ {{- if .Values.global.postgresql.auth.secretKeys.adminPasswordKey }}
+ {{- printf "%s" (tpl .Values.global.postgresql.auth.secretKeys.adminPasswordKey $) -}}
+ {{- else if .Values.auth.secretKeys.adminPasswordKey -}}
+ {{- printf "%s" (tpl .Values.auth.secretKeys.adminPasswordKey $) -}}
+ {{- end -}}
+{{- else -}}
+ {{- "postgres-password" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the user-password key.
+*/}}
+{{- define "postgresql.userPasswordKey" -}}
+{{- if or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret }}
+ {{- if or (empty (include "postgresql.username" .)) (eq (include "postgresql.username" .) "postgres") }}
+ {{- printf "%s" (include "postgresql.adminPasswordKey" .) -}}
+ {{- else -}}
+ {{- if .Values.global.postgresql.auth.secretKeys.userPasswordKey }}
+ {{- printf "%s" (tpl .Values.global.postgresql.auth.secretKeys.userPasswordKey $) -}}
+ {{- else if .Values.auth.secretKeys.userPasswordKey -}}
+ {{- printf "%s" (tpl .Values.auth.secretKeys.userPasswordKey $) -}}
+ {{- end -}}
+ {{- end -}}
+{{- else -}}
+ {{- ternary "password" "postgres-password" (and (not (empty (include "postgresql.username" .))) (ne (include "postgresql.username" .) "postgres")) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a secret object should be created
+*/}}
+{{- define "postgresql.createSecret" -}}
+{{- if not (or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret) -}}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return PostgreSQL service port
+*/}}
+{{- define "postgresql.service.port" -}}
+{{- if .Values.global.postgresql.service.ports.postgresql }}
+ {{- .Values.global.postgresql.service.ports.postgresql -}}
+{{- else -}}
+ {{- .Values.primary.service.ports.postgresql -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return PostgreSQL service port
+*/}}
+{{- define "postgresql.readReplica.service.port" -}}
+{{- if .Values.global.postgresql.service.ports.postgresql }}
+ {{- .Values.global.postgresql.service.ports.postgresql -}}
+{{- else -}}
+ {{- .Values.readReplicas.service.ports.postgresql -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the PostgreSQL primary configuration ConfigMap name.
+*/}}
+{{- define "postgresql.primary.configmapName" -}}
+{{- if .Values.primary.existingConfigmap -}}
+ {{- printf "%s" (tpl .Values.primary.existingConfigmap $) -}}
+{{- else -}}
+ {{- printf "%s-configuration" (include "postgresql.primary.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a configmap object should be created for PostgreSQL primary with the configuration
+*/}}
+{{- define "postgresql.primary.createConfigmap" -}}
+{{- if and (or .Values.primary.configuration .Values.primary.pgHbaConfiguration) (not .Values.primary.existingConfigmap) }}
+ {{- true -}}
+{{- else -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the PostgreSQL primary extended configuration ConfigMap name.
+*/}}
+{{- define "postgresql.primary.extendedConfigmapName" -}}
+{{- if .Values.primary.existingExtendedConfigmap -}}
+ {{- printf "%s" (tpl .Values.primary.existingExtendedConfigmap $) -}}
+{{- else -}}
+ {{- printf "%s-extended-configuration" (include "postgresql.primary.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the PostgreSQL read replica extended configuration ConfigMap name.
+*/}}
+{{- define "postgresql.readReplicas.extendedConfigmapName" -}}
+ {{- printf "%s-extended-configuration" (include "postgresql.readReplica.fullname" .) -}}
+{{- end -}}
+
+{{/*
+Return true if a configmap object should be created for PostgreSQL primary with the extended configuration
+*/}}
+{{- define "postgresql.primary.createExtendedConfigmap" -}}
+{{- if and .Values.primary.extendedConfiguration (not .Values.primary.existingExtendedConfigmap) }}
+ {{- true -}}
+{{- else -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a configmap object should be created for PostgreSQL read replica with the extended configuration
+*/}}
+{{- define "postgresql.readReplicas.createExtendedConfigmap" -}}
+{{- if .Values.readReplicas.extendedConfiguration }}
+ {{- true -}}
+{{- else -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+ Create the name of the service account to use
+ */}}
+{{- define "postgresql.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a configmap should be mounted with PostgreSQL configuration
+*/}}
+{{- define "postgresql.mountConfigurationCM" -}}
+{{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the initialization scripts ConfigMap name.
+*/}}
+{{- define "postgresql.initdb.scriptsCM" -}}
+{{- if .Values.primary.initdb.scriptsConfigMap -}}
+ {{- printf "%s" (tpl .Values.primary.initdb.scriptsConfigMap $) -}}
+{{- else -}}
+ {{- printf "%s-init-scripts" (include "postgresql.primary.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{/*
+Return true if TLS is enabled for LDAP connection
+*/}}
+{{- define "postgresql.ldap.tls.enabled" -}}
+{{- if and (kindIs "string" .Values.ldap.tls) (not (empty .Values.ldap.tls)) }}
+ {{- true -}}
+{{- else if and (kindIs "map" .Values.ldap.tls) .Values.ldap.tls.enabled }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the readiness probe command
+*/}}
+{{- define "postgresql.readinessProbeCommand" -}}
+{{- $customUser := include "postgresql.username" . }}
+- |
+{{- if (include "postgresql.database" .) }}
+ exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if .Values.tls.enabled }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+{{- else }}
+ exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if .Values.tls.enabled }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+{{- end }}
+{{- if contains "bitnami/" .Values.image.repository }}
+ [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ]
+{{- end -}}
+{{- end -}}
+
+{{/*
+Compile all warnings into a single message, and call fail.
+*/}}
+{{- define "postgresql.validateValues" -}}
+{{- $messages := list -}}
+{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}}
+{{- $messages := append $messages (include "postgresql.validateValues.psp" .) -}}
+{{- $messages := without $messages "" -}}
+{{- $message := join "\n" $messages -}}
+
+{{- if $message -}}
+{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap
+*/}}
+{{- define "postgresql.validateValues.ldapConfigurationMethod" -}}
+{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }}
+postgresql: ldap.url, ldap.server
+ You cannot set both `ldap.url` and `ldap.server` at the same time.
+ Please provide a unique way to configure LDAP.
+ More info at https://www.postgresql.org/docs/current/auth-ldap.html
+{{- end -}}
+{{- end -}}
+
+{{/*
+Validate values of Postgresql - If PSP is enabled RBAC should be enabled too
+*/}}
+{{- define "postgresql.validateValues.psp" -}}
+{{- if and .Values.psp.create (not .Values.rbac.create) }}
+postgresql: psp.create, rbac.create
+ RBAC should be enabled if PSP is enabled in order for PSP to work.
+ More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the cert file.
+*/}}
+{{- define "postgresql.tlsCert" -}}
+{{- if .Values.tls.autoGenerated }}
+ {{- printf "/opt/bitnami/postgresql/certs/tls.crt" -}}
+{{- else -}}
+ {{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/postgresql/certs/%s" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the cert key file.
+*/}}
+{{- define "postgresql.tlsCertKey" -}}
+{{- if .Values.tls.autoGenerated }}
+ {{- printf "/opt/bitnami/postgresql/certs/tls.key" -}}
+{{- else -}}
+{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/postgresql/certs/%s" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the CA cert file.
+*/}}
+{{- define "postgresql.tlsCACert" -}}
+{{- if .Values.tls.autoGenerated }}
+ {{- printf "/opt/bitnami/postgresql/certs/ca.crt" -}}
+{{- else -}}
+ {{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.certCAFilename -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the CRL file.
+*/}}
+{{- define "postgresql.tlsCRL" -}}
+{{- if .Values.tls.crlFilename -}}
+{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.crlFilename -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a TLS credentials secret object should be created
+*/}}
+{{- define "postgresql.createTlsSecret" -}}
+{{- if and .Values.tls.autoGenerated (not .Values.tls.certificatesSecret) }}
+ {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the CA cert file.
+*/}}
+{{- define "postgresql.tlsSecretName" -}}
+{{- if .Values.tls.autoGenerated }}
+ {{- printf "%s-crt" (include "common.names.fullname" .) -}}
+{{- else -}}
+ {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }}
+{{- end -}}
+{{- end -}}
diff --git a/charts/postgresql/templates/extra-list.yaml b/charts/postgresql/templates/extra-list.yaml
new file mode 100644
index 0000000..9ac65f9
--- /dev/null
+++ b/charts/postgresql/templates/extra-list.yaml
@@ -0,0 +1,4 @@
+{{- range .Values.extraDeploy }}
+---
+{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
+{{- end }}
diff --git a/charts/postgresql/templates/networkpolicy-egress.yaml b/charts/postgresql/templates/networkpolicy-egress.yaml
new file mode 100644
index 0000000..e862147
--- /dev/null
+++ b/charts/postgresql/templates/networkpolicy-egress.yaml
@@ -0,0 +1,32 @@
+{{- if and .Values.networkPolicy.enabled (or .Values.networkPolicy.egressRules.denyConnectionsToExternal .Values.networkPolicy.egressRules.customRules) }}
+apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }}
+kind: NetworkPolicy
+metadata:
+ name: {{ printf "%s-egress" (include "common.names.fullname" .) }}
+ namespace: {{ .Release.Namespace }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ policyTypes:
+ - Egress
+ egress:
+ {{- if .Values.networkPolicy.egressRules.denyConnectionsToExternal }}
+ - ports:
+ - port: 53
+ protocol: UDP
+ - port: 53
+ protocol: TCP
+ - to:
+ - namespaceSelector: {}
+ {{- end }}
+ {{- if .Values.networkPolicy.egressRules.customRules }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.egressRules.customRules "context" $) | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/postgresql/templates/primary/configmap.yaml b/charts/postgresql/templates/primary/configmap.yaml
new file mode 100644
index 0000000..d654a22
--- /dev/null
+++ b/charts/postgresql/templates/primary/configmap.yaml
@@ -0,0 +1,24 @@
+{{- if (include "postgresql.primary.createConfigmap" .) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-configuration" (include "postgresql.primary.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: primary
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+ {{- if .Values.primary.configuration }}
+ postgresql.conf: |-
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.configuration "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.primary.pgHbaConfiguration }}
+ pg_hba.conf: |
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.pgHbaConfiguration "context" $ ) | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/postgresql/templates/primary/extended-configmap.yaml b/charts/postgresql/templates/primary/extended-configmap.yaml
new file mode 100644
index 0000000..d129bd3
--- /dev/null
+++ b/charts/postgresql/templates/primary/extended-configmap.yaml
@@ -0,0 +1,18 @@
+{{- if (include "postgresql.primary.createExtendedConfigmap" .) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-extended-configuration" (include "postgresql.primary.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: primary
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+ override.conf: |-
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.extendedConfiguration "context" $ ) | nindent 4 }}
+{{- end }}
diff --git a/charts/postgresql/templates/primary/initialization-configmap.yaml b/charts/postgresql/templates/primary/initialization-configmap.yaml
new file mode 100644
index 0000000..d3d26cb
--- /dev/null
+++ b/charts/postgresql/templates/primary/initialization-configmap.yaml
@@ -0,0 +1,15 @@
+{{- if and .Values.primary.initdb.scripts (not .Values.primary.initdb.scriptsConfigMap) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-init-scripts" (include "postgresql.primary.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data: {{- include "common.tplvalues.render" (dict "value" .Values.primary.initdb.scripts "context" .) | nindent 2 }}
+{{- end }}
diff --git a/charts/postgresql/templates/primary/metrics-configmap.yaml b/charts/postgresql/templates/primary/metrics-configmap.yaml
new file mode 100644
index 0000000..8ad2f35
--- /dev/null
+++ b/charts/postgresql/templates/primary/metrics-configmap.yaml
@@ -0,0 +1,16 @@
+{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-metrics" (include "postgresql.primary.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+ custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }}
+{{- end }}
diff --git a/charts/postgresql/templates/primary/metrics-svc.yaml b/charts/postgresql/templates/primary/metrics-svc.yaml
new file mode 100644
index 0000000..a38b52a
--- /dev/null
+++ b/charts/postgresql/templates/primary/metrics-svc.yaml
@@ -0,0 +1,33 @@
+{{- if .Values.metrics.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ printf "%s-metrics" (include "postgresql.primary.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: metrics
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.commonAnnotations .Values.metrics.service.annotations }}
+ annotations:
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.metrics.service.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+spec:
+ type: ClusterIP
+ sessionAffinity: {{ .Values.metrics.service.sessionAffinity }}
+ {{- if .Values.metrics.service.clusterIP }}
+ clusterIP: {{ .Values.metrics.service.clusterIP }}
+ {{- end }}
+ ports:
+ - name: http-metrics
+ port: {{ .Values.metrics.service.ports.metrics }}
+ targetPort: http-metrics
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+ app.kubernetes.io/component: primary
+{{- end }}
diff --git a/charts/postgresql/templates/primary/networkpolicy.yaml b/charts/postgresql/templates/primary/networkpolicy.yaml
new file mode 100644
index 0000000..ce0052d
--- /dev/null
+++ b/charts/postgresql/templates/primary/networkpolicy.yaml
@@ -0,0 +1,57 @@
+{{- if and .Values.networkPolicy.enabled (or .Values.networkPolicy.metrics.enabled .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled) }}
+apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }}
+kind: NetworkPolicy
+metadata:
+ name: {{ printf "%s-ingress" (include "postgresql.primary.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: primary
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ app.kubernetes.io/component: primary
+ ingress:
+ {{- if and .Values.metrics.enabled .Values.networkPolicy.metrics.enabled (or .Values.networkPolicy.metrics.namespaceSelector .Values.networkPolicy.metrics.podSelector) }}
+ - from:
+ {{- if .Values.networkPolicy.metrics.namespaceSelector }}
+ - namespaceSelector:
+ matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.metrics.namespaceSelector "context" $) | nindent 14 }}
+ {{- end }}
+ {{- if .Values.networkPolicy.metrics.podSelector }}
+ - podSelector:
+ matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.metrics.podSelector "context" $) | nindent 14 }}
+ {{- end }}
+ ports:
+ - port: {{ .Values.metrics.containerPorts.metrics }}
+ {{- end }}
+ {{- if and .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled (or .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector) }}
+ - from:
+ {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector }}
+ - namespaceSelector:
+ matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector "context" $) | nindent 14 }}
+ {{- end }}
+ {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector }}
+ - podSelector:
+ matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector "context" $) | nindent 14 }}
+ {{- end }}
+ ports:
+ - port: {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- if and .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled (eq .Values.architecture "replication") }}
+ - from:
+ - podSelector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }}
+ app.kubernetes.io/component: read
+ ports:
+ - port: {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules "context" $) | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/postgresql/templates/primary/servicemonitor.yaml b/charts/postgresql/templates/primary/servicemonitor.yaml
new file mode 100644
index 0000000..c4a19fe
--- /dev/null
+++ b/charts/postgresql/templates/primary/servicemonitor.yaml
@@ -0,0 +1,48 @@
+{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: {{ include "postgresql.primary.fullname" . }}
+ namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: metrics
+ {{- if .Values.metrics.serviceMonitor.labels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ {{- if .Values.metrics.serviceMonitor.jobLabel }}
+ jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }}
+ {{- end }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ {{- if .Values.metrics.serviceMonitor.selector }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }}
+ {{- end }}
+ app.kubernetes.io/component: metrics
+ endpoints:
+ - port: http-metrics
+ {{- if .Values.metrics.serviceMonitor.interval }}
+ interval: {{ .Values.metrics.serviceMonitor.interval }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
+ scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.relabelings }}
+ relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.metricRelabelings }}
+ metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.honorLabels }}
+ honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }}
+ {{- end }}
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace | quote }}
+{{- end }}
diff --git a/charts/postgresql/templates/primary/statefulset.yaml b/charts/postgresql/templates/primary/statefulset.yaml
new file mode 100644
index 0000000..d56d052
--- /dev/null
+++ b/charts/postgresql/templates/primary/statefulset.yaml
@@ -0,0 +1,640 @@
+{{- $customUser := include "postgresql.username" . }}
+apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
+kind: StatefulSet
+metadata:
+ name: {{ include "postgresql.primary.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: primary
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.primary.labels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.labels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.commonAnnotations .Values.primary.annotations }}
+ annotations:
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.primary.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+spec:
+ replicas: 1
+ serviceName: {{ include "postgresql.primary.svc.headless" . }}
+ {{- if .Values.primary.updateStrategy }}
+ updateStrategy: {{- toYaml .Values.primary.updateStrategy | nindent 4 }}
+ {{- end }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ app.kubernetes.io/component: primary
+ template:
+ metadata:
+ name: {{ include "postgresql.primary.fullname" . }}
+ labels: {{- include "common.labels.standard" . | nindent 8 }}
+ app.kubernetes.io/component: primary
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.primary.podLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if or (include "postgresql.primary.createConfigmap" .) (include "postgresql.primary.createExtendedConfigmap" .) .Values.primary.podAnnotations }}
+ annotations:
+ {{- if (include "postgresql.primary.createConfigmap" .) }}
+ checksum/configuration: {{ include (print $.Template.BasePath "/primary/configmap.yaml") . | sha256sum }}
+ {{- end }}
+ {{- if (include "postgresql.primary.createExtendedConfigmap" .) }}
+ checksum/extended-configuration: {{ include (print $.Template.BasePath "/primary/extended-configmap.yaml") . | sha256sum }}
+ {{- end }}
+ {{- if .Values.primary.podAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podAnnotations "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- end }}
+ spec:
+ {{- if .Values.primary.extraPodSpec }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraPodSpec "context" $) | nindent 6 }}
+ {{- end }}
+ serviceAccountName: {{ include "postgresql.serviceAccountName" . }}
+ {{- include "postgresql.imagePullSecrets" . | nindent 6 }}
+ {{- if .Values.primary.hostAliases }}
+ hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.primary.hostAliases "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.primary.affinity }}
+ affinity: {{- include "common.tplvalues.render" (dict "value" .Values.primary.affinity "context" $) | nindent 8 }}
+ {{- else }}
+ affinity:
+ podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAffinityPreset "component" "primary" "context" $) | nindent 10 }}
+ podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAntiAffinityPreset "component" "primary" "context" $) | nindent 10 }}
+ nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.primary.nodeAffinityPreset.type "key" .Values.primary.nodeAffinityPreset.key "values" .Values.primary.nodeAffinityPreset.values) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.primary.nodeSelector }}
+ nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.nodeSelector "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.primary.tolerations }}
+ tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.primary.tolerations "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.primary.topologySpreadConstraints }}
+ topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.primary.topologySpreadConstraints "context" .) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.primary.priorityClassName }}
+ priorityClassName: {{ .Values.primary.priorityClassName }}
+ {{- end }}
+ {{- if .Values.primary.schedulerName }}
+ schedulerName: {{ .Values.primary.schedulerName | quote }}
+ {{- end }}
+ {{- if .Values.primary.terminationGracePeriodSeconds }}
+ terminationGracePeriodSeconds: {{ .Values.primary.terminationGracePeriodSeconds }}
+ {{- end }}
+ {{- if .Values.primary.podSecurityContext.enabled }}
+ securityContext: {{- omit .Values.primary.podSecurityContext "enabled" | toYaml | nindent 8 }}
+ {{- end }}
+ hostNetwork: {{ .Values.primary.hostNetwork }}
+ hostIPC: {{ .Values.primary.hostIPC }}
+ {{- if or (and .Values.tls.enabled (not .Values.volumePermissions.enabled)) (and .Values.volumePermissions.enabled (or .Values.primary.persistence.enabled .Values.shmVolume.enabled)) .Values.primary.initContainers }}
+ initContainers:
+ {{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }}
+ - name: copy-certs
+ image: {{ include "postgresql.volumePermissions.image" . }}
+ imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+ {{- if .Values.primary.resources }}
+ resources: {{- toYaml .Values.primary.resources | nindent 12 }}
+ {{- end }}
+ # We don't require a privileged container in this case
+ {{- if .Values.primary.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.primary.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ command:
+ - /bin/sh
+ - -ec
+ - |
+ cp /tmp/certs/* /opt/bitnami/postgresql/certs/
+ chmod 600 {{ include "postgresql.tlsCertKey" . }}
+ volumeMounts:
+ - name: raw-certificates
+ mountPath: /tmp/certs
+ - name: postgresql-certificates
+ mountPath: /opt/bitnami/postgresql/certs
+ {{- else if and .Values.volumePermissions.enabled (or .Values.primary.persistence.enabled .Values.shmVolume.enabled) }}
+ - name: init-chmod-data
+ image: {{ include "postgresql.volumePermissions.image" . }}
+ imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+ {{- if .Values.volumePermissions.resources }}
+ resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
+ {{- end }}
+ command:
+ - /bin/sh
+ - -ec
+ - |
+ {{- if .Values.primary.persistence.enabled }}
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.primary.persistence.mountPath }}
+ {{- else }}
+ chown {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} {{ .Values.primary.persistence.mountPath }}
+ {{- end }}
+ mkdir -p {{ .Values.primary.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.primary.persistence.mountPath }}/conf {{- end }}
+ chmod 700 {{ .Values.primary.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.primary.persistence.mountPath }}/conf {{- end }}
+ find {{ .Values.primary.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ xargs -r chown -R `id -u`:`id -G | cut -d " " -f2`
+ {{- else }}
+ xargs -r chown -R {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ chmod -R 777 /dev/shm
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ cp /tmp/certs/* /opt/bitnami/postgresql/certs/
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/
+ {{- else }}
+ chown -R {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} /opt/bitnami/postgresql/certs/
+ {{- end }}
+ chmod 600 {{ include "postgresql.tlsCertKey" . }}
+ {{- end }}
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }}
+ {{- else }}
+ securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.primary.persistence.enabled }}
+ - name: data
+ mountPath: {{ .Values.primary.persistence.mountPath }}
+ {{- if .Values.primary.persistence.subPath }}
+ subPath: {{ .Values.primary.persistence.subPath }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ - name: dshm
+ mountPath: /dev/shm
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: raw-certificates
+ mountPath: /tmp/certs
+ - name: postgresql-certificates
+ mountPath: /opt/bitnami/postgresql/certs
+ {{- end }}
+ {{- end }}
+ {{- if .Values.primary.initContainers }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.initContainers "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- end }}
+ containers:
+ - name: postgresql
+ image: {{ include "postgresql.image" . }}
+ imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+ {{- if .Values.primary.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.primary.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ {{- else if .Values.primary.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.primary.command "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- else if .Values.primary.args }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.primary.args "context" $) | nindent 12 }}
+ {{- end }}
+ env:
+ - name: BITNAMI_DEBUG
+ value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
+ - name: POSTGRESQL_PORT_NUMBER
+ value: {{ .Values.containerPorts.postgresql | quote }}
+ - name: POSTGRESQL_VOLUME_DIR
+ value: {{ .Values.primary.persistence.mountPath | quote }}
+ {{- if .Values.primary.persistence.mountPath }}
+ - name: PGDATA
+ value: {{ .Values.postgresqlDataDir | quote }}
+ {{- end }}
+ # Authentication
+ {{- if and (not (empty $customUser)) (ne $customUser "postgres") }}
+ - name: POSTGRES_USER
+ value: {{ $customUser | quote }}
+ {{- if .Values.auth.enablePostgresUser }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: POSTGRES_POSTGRES_PASSWORD_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.adminPasswordKey" .) }}
+ {{- else }}
+ - name: POSTGRES_POSTGRES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.secretName" . }}
+ key: {{ include "postgresql.adminPasswordKey" . }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: POSTGRES_PASSWORD_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.userPasswordKey" .) }}
+ {{- else }}
+ - name: POSTGRES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.secretName" . }}
+ key: {{ include "postgresql.userPasswordKey" . }}
+ {{- end }}
+ {{- if (include "postgresql.database" .) }}
+ - name: POSTGRES_DB
+ value: {{ (include "postgresql.database" .) | quote }}
+ {{- end }}
+ # Replication
+ {{- if or (eq .Values.architecture "replication") .Values.primary.standby.enabled }}
+ - name: POSTGRES_REPLICATION_MODE
+ value: {{ ternary "slave" "master" .Values.primary.standby.enabled | quote }}
+ - name: POSTGRES_REPLICATION_USER
+ value: {{ .Values.auth.replicationUsername | quote }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: POSTGRES_REPLICATION_PASSWORD_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.replicationPasswordKey" .) }}
+ {{- else }}
+ - name: POSTGRES_REPLICATION_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.secretName" . }}
+ key: {{ include "postgresql.replicationPasswordKey" . }}
+ {{- end }}
+ {{- if not (eq .Values.replication.synchronousCommit "off") }}
+ - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE
+ value: {{ .Values.replication.synchronousCommit | quote }}
+ - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS
+ value: {{ .Values.replication.numSynchronousReplicas | quote }}
+ {{- end }}
+ - name: POSTGRES_CLUSTER_APP_NAME
+ value: {{ .Values.replication.applicationName }}
+ {{- end }}
+ # Initdb
+ {{- if .Values.primary.initdb.args }}
+ - name: POSTGRES_INITDB_ARGS
+ value: {{ .Values.primary.initdb.args | quote }}
+ {{- end }}
+ {{- if .Values.primary.initdb.postgresqlWalDir }}
+ - name: POSTGRES_INITDB_WALDIR
+ value: {{ .Values.primary.initdb.postgresqlWalDir | quote }}
+ {{- end }}
+ {{- if .Values.primary.initdb.user }}
+ - name: POSTGRESQL_INITSCRIPTS_USERNAME
+ value: {{ .Values.primary.initdb.user }}
+ {{- end }}
+ {{- if .Values.primary.initdb.password }}
+ - name: POSTGRESQL_INITSCRIPTS_PASSWORD
+ value: {{ .Values.primary.initdb.password | quote }}
+ {{- end }}
+ # Standby
+ {{- if .Values.primary.standby.enabled }}
+ - name: POSTGRES_MASTER_HOST
+ value: {{ .Values.primary.standby.primaryHost }}
+ - name: POSTGRES_MASTER_PORT_NUMBER
+ value: {{ .Values.primary.standby.primaryPort | quote }}
+ {{- end }}
+ # LDAP
+ - name: POSTGRESQL_ENABLE_LDAP
+ value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }}
+ {{- if .Values.ldap.enabled }}
+ {{- if or .Values.ldap.url .Values.ldap.uri }}
+ - name: POSTGRESQL_LDAP_URL
+ value: {{ coalesce .Values.ldap.url .Values.ldap.uri }}
+ {{- else }}
+ - name: POSTGRESQL_LDAP_SERVER
+ value: {{ .Values.ldap.server }}
+ - name: POSTGRESQL_LDAP_PORT
+ value: {{ .Values.ldap.port | quote }}
+ - name: POSTGRESQL_LDAP_SCHEME
+ value: {{ .Values.ldap.scheme }}
+ {{- if (include "postgresql.ldap.tls.enabled" .) }}
+ - name: POSTGRESQL_LDAP_TLS
+ value: "1"
+ {{- end }}
+ - name: POSTGRESQL_LDAP_PREFIX
+ value: {{ .Values.ldap.prefix | quote }}
+ - name: POSTGRESQL_LDAP_SUFFIX
+ value: {{ .Values.ldap.suffix | quote }}
+ - name: POSTGRESQL_LDAP_BASE_DN
+ value: {{ coalesce .Values.ldap.baseDN .Values.ldap.basedn }}
+ - name: POSTGRESQL_LDAP_BIND_DN
+ value: {{ coalesce .Values.ldap.bindDN .Values.ldap.binddn}}
+ {{- if or (not (empty .Values.ldap.bind_password)) (not (empty .Values.ldap.bindpw)) }}
+ - name: POSTGRESQL_LDAP_BIND_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.secretName" . }}
+ key: ldap-password
+ {{- end }}
+ - name: POSTGRESQL_LDAP_SEARCH_ATTR
+ value: {{ coalesce .Values.ldap.search_attr .Values.ldap.searchAttribute }}
+ - name: POSTGRESQL_LDAP_SEARCH_FILTER
+ value: {{ coalesce .Values.ldap.search_filter .Values.ldap.searchFilter }}
+ {{- end }}
+ {{- end }}
+ # TLS
+ - name: POSTGRESQL_ENABLE_TLS
+ value: {{ ternary "yes" "no" .Values.tls.enabled | quote }}
+ {{- if .Values.tls.enabled }}
+ - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS
+ value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }}
+ - name: POSTGRESQL_TLS_CERT_FILE
+ value: {{ include "postgresql.tlsCert" . }}
+ - name: POSTGRESQL_TLS_KEY_FILE
+ value: {{ include "postgresql.tlsCertKey" . }}
+ {{- if .Values.tls.certCAFilename }}
+ - name: POSTGRESQL_TLS_CA_FILE
+ value: {{ include "postgresql.tlsCACert" . }}
+ {{- end }}
+ {{- if .Values.tls.crlFilename }}
+ - name: POSTGRESQL_TLS_CRL_FILE
+ value: {{ include "postgresql.tlsCRL" . }}
+ {{- end }}
+ {{- end }}
+ # Audit
+ - name: POSTGRESQL_LOG_HOSTNAME
+ value: {{ .Values.audit.logHostname | quote }}
+ - name: POSTGRESQL_LOG_CONNECTIONS
+ value: {{ .Values.audit.logConnections | quote }}
+ - name: POSTGRESQL_LOG_DISCONNECTIONS
+ value: {{ .Values.audit.logDisconnections | quote }}
+ {{- if .Values.audit.logLinePrefix }}
+ - name: POSTGRESQL_LOG_LINE_PREFIX
+ value: {{ .Values.audit.logLinePrefix | quote }}
+ {{- end }}
+ {{- if .Values.audit.logTimezone }}
+ - name: POSTGRESQL_LOG_TIMEZONE
+ value: {{ .Values.audit.logTimezone | quote }}
+ {{- end }}
+ {{- if .Values.audit.pgAuditLog }}
+ - name: POSTGRESQL_PGAUDIT_LOG
+ value: {{ .Values.audit.pgAuditLog | quote }}
+ {{- end }}
+ - name: POSTGRESQL_PGAUDIT_LOG_CATALOG
+ value: {{ .Values.audit.pgAuditLogCatalog | quote }}
+ # Others
+ - name: POSTGRESQL_CLIENT_MIN_MESSAGES
+ value: {{ .Values.audit.clientMinMessages | quote }}
+ - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES
+ value: {{ .Values.postgresqlSharedPreloadLibraries | quote }}
+ {{- if .Values.primary.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if or .Values.primary.extraEnvVarsCM .Values.primary.extraEnvVarsSecret }}
+ envFrom:
+ {{- if .Values.primary.extraEnvVarsCM }}
+ - configMapRef:
+ name: {{ .Values.primary.extraEnvVarsCM }}
+ {{- end }}
+ {{- if .Values.primary.extraEnvVarsSecret }}
+ - secretRef:
+ name: {{ .Values.primary.extraEnvVarsSecret }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - name: tcp-postgresql
+ containerPort: {{ .Values.containerPorts.postgresql }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.primary.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.primary.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.startupProbe "enabled") "context" $) | nindent 12 }}
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ {{- if (include "postgresql.database" .) }}
+ - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- else }}
+ - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.primary.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.primary.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.livenessProbe "enabled") "context" $) | nindent 12 }}
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ {{- if (include "postgresql.database" .) }}
+ - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- else }}
+ - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.primary.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.primary.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.readinessProbe "enabled") "context" $) | nindent 12 }}
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - -e
+ {{- include "postgresql.readinessProbeCommand" . | nindent 16 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.primary.resources }}
+ resources: {{- toYaml .Values.primary.resources | nindent 12 }}
+ {{- end }}
+ {{- if .Values.primary.lifecycleHooks }}
+ lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.primary.lifecycleHooks "context" $) | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ {{- if or .Values.primary.initdb.scriptsConfigMap .Values.primary.initdb.scripts }}
+ - name: custom-init-scripts
+ mountPath: /docker-entrypoint-initdb.d/
+ {{- end }}
+ {{- if .Values.primary.initdb.scriptsSecret }}
+ - name: custom-init-scripts-secret
+ mountPath: /docker-entrypoint-initdb.d/secret
+ {{- end }}
+ {{- if or .Values.primary.extendedConfiguration .Values.primary.existingExtendedConfigmap }}
+ - name: postgresql-extended-config
+ mountPath: {{ .Values.primary.persistence.mountPath }}/conf/conf.d/
+ {{- end }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: postgresql-password
+ mountPath: /opt/bitnami/postgresql/secrets/
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: postgresql-certificates
+ mountPath: /opt/bitnami/postgresql/certs
+ readOnly: true
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ - name: dshm
+ mountPath: /dev/shm
+ {{- end }}
+ {{- if .Values.primary.persistence.enabled }}
+ - name: data
+ mountPath: {{ .Values.primary.persistence.mountPath }}
+ {{- if .Values.primary.persistence.subPath }}
+ subPath: {{ .Values.primary.persistence.subPath }}
+ {{- end }}
+ {{- end }}
+ {{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }}
+ - name: postgresql-config
+ mountPath: {{ .Values.primary.persistence.mountPath }}/conf
+ {{- end }}
+ {{- if .Values.primary.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraVolumeMounts "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.metrics.enabled }}
+ - name: metrics
+ image: {{ include "postgresql.metrics.image" . }}
+ imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
+ {{- if .Values.metrics.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.customMetrics }}
+ args: ["--extend.query-path", "/conf/custom-metrics.yaml"]
+ {{- end }}
+ env:
+ {{- $database := required "In order to enable metrics you need to specify a database (.Values.auth.database or .Values.global.postgresql.auth.database)" (include "postgresql.database" .) }}
+ - name: DATA_SOURCE_URI
+ value: {{ printf "127.0.0.1:%d/%s?sslmode=disable" (int (include "postgresql.service.port" .)) $database }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: DATA_SOURCE_PASS_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.userPasswordKey" .) }}
+ {{- else }}
+ - name: DATA_SOURCE_PASS
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.secretName" . }}
+ key: {{ include "postgresql.userPasswordKey" . }}
+ {{- end }}
+ - name: DATA_SOURCE_USER
+ value: {{ default "postgres" $customUser | quote }}
+ {{- if .Values.metrics.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ ports:
+ - name: http-metrics
+ containerPort: {{ .Values.metrics.containerPorts.metrics }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.metrics.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: http-metrics
+ {{- end }}
+ {{- if .Values.metrics.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /
+ port: http-metrics
+ {{- end }}
+ {{- if .Values.metrics.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /
+ port: http-metrics
+ {{- end }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: postgresql-password
+ mountPath: /opt/bitnami/postgresql/secrets/
+ {{- end }}
+ {{- if .Values.metrics.customMetrics }}
+ - name: custom-metrics
+ mountPath: /conf
+ readOnly: true
+ {{- end }}
+ {{- if .Values.metrics.resources }}
+ resources: {{- toYaml .Values.metrics.resources | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.primary.sidecars }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.sidecars "context" $ ) | nindent 8 }}
+ {{- end }}
+ volumes:
+ {{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }}
+ - name: postgresql-config
+ configMap:
+ name: {{ include "postgresql.primary.configmapName" . }}
+ {{- end }}
+ {{- if or .Values.primary.extendedConfiguration .Values.primary.existingExtendedConfigmap }}
+ - name: postgresql-extended-config
+ configMap:
+ name: {{ include "postgresql.primary.extendedConfigmapName" . }}
+ {{- end }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: postgresql-password
+ secret:
+ secretName: {{ include "postgresql.secretName" . }}
+ {{- end }}
+ {{- if or .Values.primary.initdb.scriptsConfigMap .Values.primary.initdb.scripts }}
+ - name: custom-init-scripts
+ configMap:
+ name: {{ include "postgresql.initdb.scriptsCM" . }}
+ {{- end }}
+ {{- if .Values.primary.initdb.scriptsSecret }}
+ - name: custom-init-scripts-secret
+ secret:
+ secretName: {{ tpl .Values.primary.initdb.scriptsSecret $ }}
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: raw-certificates
+ secret:
+ secretName: {{ include "postgresql.tlsSecretName" . }}
+ - name: postgresql-certificates
+ emptyDir: {}
+ {{- end }}
+ {{- if .Values.primary.extraVolumes }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.extraVolumes "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }}
+ - name: custom-metrics
+ configMap:
+ name: {{ printf "%s-metrics" (include "postgresql.primary.fullname" .) }}
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ - name: dshm
+ emptyDir:
+ medium: Memory
+ {{- if .Values.shmVolume.sizeLimit }}
+ sizeLimit: {{ .Values.shmVolume.sizeLimit }}
+ {{- end }}
+ {{- end }}
+ {{- if and .Values.primary.persistence.enabled .Values.primary.persistence.existingClaim }}
+ - name: data
+ persistentVolumeClaim:
+ claimName: {{ tpl .Values.primary.persistence.existingClaim $ }}
+ {{- else if not .Values.primary.persistence.enabled }}
+ - name: data
+ emptyDir: {}
+ {{- else }}
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ {{- if .Values.primary.persistence.annotations }}
+ annotations: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.annotations "context" $) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.primary.persistence.labels }}
+ labels: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.labels "context" $) | nindent 10 }}
+ {{- end }}
+ spec:
+ accessModes:
+ {{- range .Values.primary.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ {{- if .Values.primary.persistence.dataSource }}
+ dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.dataSource "context" $) | nindent 10 }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.primary.persistence.size | quote }}
+ {{- if .Values.primary.persistence.selector }}
+ selector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.selector "context" $) | nindent 10 }}
+ {{- end }}
+ {{- include "common.storage.class" (dict "persistence" .Values.primary.persistence "global" .Values.global) | nindent 8 }}
+ {{- end }}
diff --git a/charts/postgresql/templates/primary/svc-headless.yaml b/charts/postgresql/templates/primary/svc-headless.yaml
new file mode 100644
index 0000000..684177a
--- /dev/null
+++ b/charts/postgresql/templates/primary/svc-headless.yaml
@@ -0,0 +1,37 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "postgresql.primary.svc.headless" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ app.kubernetes.io/component: primary
+ {{- if or .Values.primary.service.headless.annotations .Values.commonAnnotations }}
+ annotations:
+ {{- if .Values.primary.service.headless.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.primary.service.headless.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+ # Use this annotation in addition to the actual publishNotReadyAddresses
+ # field below because the annotation will stop being respected soon but the
+ # field is broken in some versions of Kubernetes:
+ # https://github.com/kubernetes/kubernetes/issues/58662
+ service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
+spec:
+ type: ClusterIP
+ clusterIP: None
+ # We want all pods in the StatefulSet to have their addresses published for
+ # the sake of the other Postgresql pods even before they're ready, since they
+ # have to be able to talk to each other in order to become ready.
+ publishNotReadyAddresses: true
+ ports:
+ - name: tcp-postgresql
+ port: {{ template "postgresql.service.port" . }}
+ targetPort: tcp-postgresql
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+ app.kubernetes.io/component: primary
diff --git a/charts/postgresql/templates/primary/svc.yaml b/charts/postgresql/templates/primary/svc.yaml
new file mode 100644
index 0000000..6ddd55b
--- /dev/null
+++ b/charts/postgresql/templates/primary/svc.yaml
@@ -0,0 +1,53 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "postgresql.primary.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ app.kubernetes.io/component: primary
+ {{- if or .Values.commonAnnotations .Values.primary.service.annotations }}
+ annotations:
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.primary.service.annotations }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.annotations "context" $) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+spec:
+ type: {{ .Values.primary.service.type }}
+ {{- if or (eq .Values.primary.service.type "LoadBalancer") (eq .Values.primary.service.type "NodePort") }}
+ externalTrafficPolicy: {{ .Values.primary.service.externalTrafficPolicy | quote }}
+ {{- end }}
+ {{- if and (eq .Values.primary.service.type "LoadBalancer") (not (empty .Values.primary.service.loadBalancerSourceRanges)) }}
+ loadBalancerSourceRanges: {{ .Values.primary.service.loadBalancerSourceRanges }}
+ {{- end }}
+ {{- if and (eq .Values.primary.service.type "LoadBalancer") (not (empty .Values.primary.service.loadBalancerIP)) }}
+ loadBalancerIP: {{ .Values.primary.service.loadBalancerIP }}
+ {{- end }}
+ {{- if and .Values.primary.service.clusterIP (eq .Values.primary.service.type "ClusterIP") }}
+ clusterIP: {{ .Values.primary.service.clusterIP }}
+ {{- end }}
+ {{- if .Values.primary.service.sessionAffinity }}
+ sessionAffinity: {{ .Values.primary.service.sessionAffinity }}
+ {{- end }}
+ {{- if .Values.primary.service.sessionAffinityConfig }}
+ sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.sessionAffinityConfig "context" $) | nindent 4 }}
+ {{- end }}
+ ports:
+ - name: tcp-postgresql
+ port: {{ template "postgresql.service.port" . }}
+ targetPort: tcp-postgresql
+ {{- if and (or (eq .Values.primary.service.type "NodePort") (eq .Values.primary.service.type "LoadBalancer")) (not (empty .Values.primary.service.nodePorts.postgresql)) }}
+ nodePort: {{ .Values.primary.service.nodePorts.postgresql }}
+ {{- else if eq .Values.primary.service.type "ClusterIP" }}
+ nodePort: null
+ {{- end }}
+ {{- if .Values.primary.service.extraPorts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.extraPorts "context" $) | nindent 4 }}
+ {{- end }}
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+ app.kubernetes.io/component: primary
diff --git a/charts/postgresql/templates/prometheusrule.yaml b/charts/postgresql/templates/prometheusrule.yaml
new file mode 100644
index 0000000..24be710
--- /dev/null
+++ b/charts/postgresql/templates/prometheusrule.yaml
@@ -0,0 +1,22 @@
+{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: PrometheusRule
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ namespace: {{ default .Release.Namespace .Values.metrics.prometheusRule.namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: metrics
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.metrics.prometheusRule.labels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.labels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ groups:
+ - name: {{ include "common.names.fullname" . }}
+ rules: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.rules "context" $ ) | nindent 8 }}
+{{- end }}
diff --git a/charts/postgresql/templates/psp.yaml b/charts/postgresql/templates/psp.yaml
new file mode 100644
index 0000000..48d1175
--- /dev/null
+++ b/charts/postgresql/templates/psp.yaml
@@ -0,0 +1,41 @@
+{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}}
+{{- if and $pspAvailable .Values.psp.create }}
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ privileged: false
+ volumes:
+ - 'configMap'
+ - 'secret'
+ - 'persistentVolumeClaim'
+ - 'emptyDir'
+ - 'projected'
+ hostNetwork: false
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ rule: 'RunAsAny'
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1
+ max: 65535
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1
+ max: 65535
+ readOnlyRootFilesystem: false
+{{- end }}
diff --git a/charts/postgresql/templates/read/extended-configmap.yaml b/charts/postgresql/templates/read/extended-configmap.yaml
new file mode 100644
index 0000000..e329d13
--- /dev/null
+++ b/charts/postgresql/templates/read/extended-configmap.yaml
@@ -0,0 +1,18 @@
+{{- if (include "postgresql.readReplicas.createExtendedConfigmap" .) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-extended-configuration" (include "postgresql.readReplica.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: read
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+ override.conf: |-
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.extendedConfiguration "context" $ ) | nindent 4 }}
+{{- end }}
diff --git a/charts/postgresql/templates/read/metrics-configmap.yaml b/charts/postgresql/templates/read/metrics-configmap.yaml
new file mode 100644
index 0000000..b00a6ec
--- /dev/null
+++ b/charts/postgresql/templates/read/metrics-configmap.yaml
@@ -0,0 +1,16 @@
+{{- if and .Values.metrics.enabled .Values.metrics.customMetrics (eq .Values.architecture "replication") }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-metrics" (include "postgresql.readReplica.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+ custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }}
+{{- end }}
diff --git a/charts/postgresql/templates/read/metrics-svc.yaml b/charts/postgresql/templates/read/metrics-svc.yaml
new file mode 100644
index 0000000..6f54ed2
--- /dev/null
+++ b/charts/postgresql/templates/read/metrics-svc.yaml
@@ -0,0 +1,33 @@
+{{- if and .Values.metrics.enabled (eq .Values.architecture "replication") }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ printf "%s-metrics" (include "postgresql.readReplica.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: metrics-read
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.commonAnnotations .Values.metrics.service.annotations }}
+ annotations:
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.metrics.service.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+spec:
+ type: ClusterIP
+ sessionAffinity: {{ .Values.metrics.service.sessionAffinity }}
+ {{- if .Values.metrics.service.clusterIP }}
+ clusterIP: {{ .Values.metrics.service.clusterIP }}
+ {{- end }}
+ ports:
+ - name: http-metrics
+ port: {{ .Values.metrics.service.ports.metrics }}
+ targetPort: http-metrics
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+ app.kubernetes.io/component: read
+{{- end }}
diff --git a/charts/postgresql/templates/read/networkpolicy.yaml b/charts/postgresql/templates/read/networkpolicy.yaml
new file mode 100644
index 0000000..c969cd7
--- /dev/null
+++ b/charts/postgresql/templates/read/networkpolicy.yaml
@@ -0,0 +1,36 @@
+{{- if and .Values.networkPolicy.enabled (eq .Values.architecture "replication") .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled }}
+apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }}
+kind: NetworkPolicy
+metadata:
+ name: {{ printf "%s-ingress" (include "postgresql.readReplica.fullname" .) }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: read
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ podSelector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ app.kubernetes.io/component: read
+ ingress:
+ {{- if and .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled (or .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector) }}
+ - from:
+ {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector }}
+ - namespaceSelector:
+ matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector "context" $) | nindent 14 }}
+ {{- end }}
+ {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector }}
+ - podSelector:
+ matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector "context" $) | nindent 14 }}
+ {{- end }}
+ ports:
+ - port: {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules "context" $) | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/postgresql/templates/read/servicemonitor.yaml b/charts/postgresql/templates/read/servicemonitor.yaml
new file mode 100644
index 0000000..d511d6b
--- /dev/null
+++ b/charts/postgresql/templates/read/servicemonitor.yaml
@@ -0,0 +1,48 @@
+{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled (eq .Values.architecture "replication") }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: {{ include "postgresql.readReplica.fullname" . }}
+ namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: metrics-read
+ {{- if .Values.metrics.serviceMonitor.labels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ {{- if .Values.metrics.serviceMonitor.jobLabel }}
+ jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }}
+ {{- end }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ {{- if .Values.metrics.serviceMonitor.selector }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }}
+ {{- end }}
+ app.kubernetes.io/component: metrics-read
+ endpoints:
+ - port: http-metrics
+ {{- if .Values.metrics.serviceMonitor.interval }}
+ interval: {{ .Values.metrics.serviceMonitor.interval }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
+ scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.relabelings }}
+ relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.metricRelabelings }}
+ metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }}
+ {{- end }}
+ {{- if .Values.metrics.serviceMonitor.honorLabels }}
+ honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }}
+ {{- end }}
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace | quote }}
+{{- end }}
diff --git a/charts/postgresql/templates/read/statefulset.yaml b/charts/postgresql/templates/read/statefulset.yaml
new file mode 100644
index 0000000..9d28017
--- /dev/null
+++ b/charts/postgresql/templates/read/statefulset.yaml
@@ -0,0 +1,537 @@
+{{- if eq .Values.architecture "replication" }}
+{{- $customUser := include "postgresql.username" . }}
+apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
+kind: StatefulSet
+metadata:
+ name: {{ include "postgresql.readReplica.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/component: read
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.readReplicas.labels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.labels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.commonAnnotations .Values.readReplicas.annotations }}
+ annotations:
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.readReplicas.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+spec:
+ replicas: {{ .Values.readReplicas.replicaCount }}
+ serviceName: {{ include "postgresql.readReplica.svc.headless" . }}
+ {{- if .Values.readReplicas.updateStrategy }}
+ updateStrategy: {{- toYaml .Values.readReplicas.updateStrategy | nindent 4 }}
+ {{- end }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ app.kubernetes.io/component: read
+ template:
+ metadata:
+ name: {{ include "postgresql.readReplica.fullname" . }}
+ labels: {{- include "common.labels.standard" . | nindent 8 }}
+ app.kubernetes.io/component: read
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.readReplicas.podLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.podLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if or (include "postgresql.readReplicas.createExtendedConfigmap" .) .Values.readReplicas.podAnnotations }}
+ annotations:
+ {{- if (include "postgresql.readReplicas.createExtendedConfigmap" .) }}
+ checksum/extended-configuration: {{ include (print $.Template.BasePath "/read/extended-configmap.yaml") . | sha256sum }}
+ {{- end }}
+ {{- if .Values.readReplicas.podAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.podAnnotations "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- end }}
+ spec:
+ {{- if .Values.readReplicas.extraPodSpec }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraPodSpec "context" $) | nindent 6 }}
+ {{- end }}
+ serviceAccountName: {{ include "postgresql.serviceAccountName" . }}
+ {{- include "postgresql.imagePullSecrets" . | nindent 6 }}
+ {{- if .Values.readReplicas.hostAliases }}
+ hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.hostAliases "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.readReplicas.affinity }}
+ affinity: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.affinity "context" $) | nindent 8 }}
+ {{- else }}
+ affinity:
+ podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.readReplicas.podAffinityPreset "component" "read" "context" $) | nindent 10 }}
+ podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.readReplicas.podAntiAffinityPreset "component" "read" "context" $) | nindent 10 }}
+ nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.readReplicas.nodeAffinityPreset.type "key" .Values.readReplicas.nodeAffinityPreset.key "values" .Values.readReplicas.nodeAffinityPreset.values) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.readReplicas.nodeSelector }}
+ nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.nodeSelector "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.readReplicas.tolerations }}
+ tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.tolerations "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.readReplicas.topologySpreadConstraints }}
+ topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.topologySpreadConstraints "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.readReplicas.priorityClassName }}
+ priorityClassName: {{ .Values.readReplicas.priorityClassName }}
+ {{- end }}
+ {{- if .Values.readReplicas.schedulerName }}
+ schedulerName: {{ .Values.readReplicas.schedulerName | quote }}
+ {{- end }}
+ {{- if .Values.readReplicas.terminationGracePeriodSeconds }}
+ terminationGracePeriodSeconds: {{ .Values.readReplicas.terminationGracePeriodSeconds }}
+ {{- end }}
+ {{- if .Values.readReplicas.podSecurityContext.enabled }}
+ securityContext: {{- omit .Values.readReplicas.podSecurityContext "enabled" | toYaml | nindent 8 }}
+ {{- end }}
+ hostNetwork: {{ .Values.readReplicas.hostNetwork }}
+ hostIPC: {{ .Values.readReplicas.hostIPC }}
+ {{- if or (and .Values.tls.enabled (not .Values.volumePermissions.enabled)) (and .Values.volumePermissions.enabled (or .Values.readReplicas.persistence.enabled .Values.shmVolume.enabled)) .Values.readReplicas.initContainers }}
+ initContainers:
+ {{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }}
+ - name: copy-certs
+ image: {{ include "postgresql.volumePermissions.image" . }}
+ imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+ {{- if .Values.readReplicas.resources }}
+ resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }}
+ {{- end }}
+ # We don't require a privileged container in this case
+ {{- if .Values.readReplicas.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.readReplicas.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ command:
+ - /bin/sh
+ - -ec
+ - |
+ cp /tmp/certs/* /opt/bitnami/postgresql/certs/
+ chmod 600 {{ include "postgresql.tlsCertKey" . }}
+ volumeMounts:
+ - name: raw-certificates
+ mountPath: /tmp/certs
+ - name: postgresql-certificates
+ mountPath: /opt/bitnami/postgresql/certs
+ {{- else if and .Values.volumePermissions.enabled (or .Values.readReplicas.persistence.enabled .Values.shmVolume.enabled) }}
+ - name: init-chmod-data
+ image: {{ include "postgresql.volumePermissions.image" . }}
+ imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+ {{- if .Values.readReplicas.resources }}
+ resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }}
+ {{- end }}
+ command:
+ - /bin/sh
+ - -ec
+ - |
+ {{- if .Values.readReplicas.persistence.enabled }}
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.readReplicas.persistence.mountPath }}
+ {{- else }}
+ chown {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} {{ .Values.readReplicas.persistence.mountPath }}
+ {{- end }}
+ mkdir -p {{ .Values.readReplicas.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.readReplicas.persistence.mountPath }}/conf {{- end }}
+ chmod 700 {{ .Values.readReplicas.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.readReplicas.persistence.mountPath }}/conf {{- end }}
+ find {{ .Values.readReplicas.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ xargs -r chown -R `id -u`:`id -G | cut -d " " -f2`
+ {{- else }}
+ xargs -r chown -R {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ chmod -R 777 /dev/shm
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ cp /tmp/certs/* /opt/bitnami/postgresql/certs/
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/
+ {{- else }}
+ chown -R {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} /opt/bitnami/postgresql/certs/
+ {{- end }}
+ chmod 600 {{ include "postgresql.tlsCertKey" . }}
+ {{- end }}
+ {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+ securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }}
+ {{- else }}
+ securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ {{ if .Values.readReplicas.persistence.enabled }}
+ - name: data
+ mountPath: {{ .Values.readReplicas.persistence.mountPath }}
+ {{- if .Values.readReplicas.persistence.subPath }}
+ subPath: {{ .Values.readReplicas.persistence.subPath }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ - name: dshm
+ mountPath: /dev/shm
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: raw-certificates
+ mountPath: /tmp/certs
+ - name: postgresql-certificates
+ mountPath: /opt/bitnami/postgresql/certs
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.initContainers }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.initContainers "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- end }}
+ containers:
+ - name: postgresql
+ image: {{ include "postgresql.image" . }}
+ imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+ {{- if .Values.readReplicas.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.readReplicas.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ {{- else if .Values.readReplicas.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.command "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- else if .Values.readReplicas.args }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.args "context" $) | nindent 12 }}
+ {{- end }}
+ env:
+ - name: BITNAMI_DEBUG
+ value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
+ - name: POSTGRESQL_PORT_NUMBER
+ value: {{ .Values.containerPorts.postgresql | quote }}
+ - name: POSTGRESQL_VOLUME_DIR
+ value: {{ .Values.readReplicas.persistence.mountPath | quote }}
+ {{- if .Values.readReplicas.persistence.mountPath }}
+ - name: PGDATA
+ value: {{ .Values.postgresqlDataDir | quote }}
+ {{- end }}
+ # Authentication
+ {{- if and (not (empty $customUser)) (ne $customUser "postgres") .Values.auth.enablePostgresUser }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: POSTGRES_POSTGRES_PASSWORD_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.adminPasswordKey" .) }}
+ {{- else }}
+ - name: POSTGRES_POSTGRES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.secretName" . }}
+ key: {{ include "postgresql.adminPasswordKey" . }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: POSTGRES_PASSWORD_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.userPasswordKey" .) }}
+ {{- else }}
+ - name: POSTGRES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.secretName" . }}
+ key: {{ include "postgresql.userPasswordKey" . }}
+ {{- end }}
+ # Replication
+ - name: POSTGRES_REPLICATION_MODE
+ value: "slave"
+ - name: POSTGRES_REPLICATION_USER
+ value: {{ .Values.auth.replicationUsername | quote }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: POSTGRES_REPLICATION_PASSWORD_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.replicationPasswordKey" .) }}
+ {{- else }}
+ - name: POSTGRES_REPLICATION_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.secretName" . }}
+ key: {{ include "postgresql.replicationPasswordKey" . }}
+ {{- end }}
+ - name: POSTGRES_CLUSTER_APP_NAME
+ value: {{ .Values.replication.applicationName }}
+ - name: POSTGRES_MASTER_HOST
+ value: {{ include "postgresql.primary.fullname" . }}
+ - name: POSTGRES_MASTER_PORT_NUMBER
+ value: {{ include "postgresql.service.port" . | quote }}
+ # TLS
+ - name: POSTGRESQL_ENABLE_TLS
+ value: {{ ternary "yes" "no" .Values.tls.enabled | quote }}
+ {{- if .Values.tls.enabled }}
+ - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS
+ value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }}
+ - name: POSTGRESQL_TLS_CERT_FILE
+ value: {{ include "postgresql.tlsCert" . }}
+ - name: POSTGRESQL_TLS_KEY_FILE
+ value: {{ include "postgresql.tlsCertKey" . }}
+ {{- if .Values.tls.certCAFilename }}
+ - name: POSTGRESQL_TLS_CA_FILE
+ value: {{ include "postgresql.tlsCACert" . }}
+ {{- end }}
+ {{- if .Values.tls.crlFilename }}
+ - name: POSTGRESQL_TLS_CRL_FILE
+ value: {{ include "postgresql.tlsCRL" . }}
+ {{- end }}
+ {{- end }}
+ # Audit
+ - name: POSTGRESQL_LOG_HOSTNAME
+ value: {{ .Values.audit.logHostname | quote }}
+ - name: POSTGRESQL_LOG_CONNECTIONS
+ value: {{ .Values.audit.logConnections | quote }}
+ - name: POSTGRESQL_LOG_DISCONNECTIONS
+ value: {{ .Values.audit.logDisconnections | quote }}
+ {{- if .Values.audit.logLinePrefix }}
+ - name: POSTGRESQL_LOG_LINE_PREFIX
+ value: {{ .Values.audit.logLinePrefix | quote }}
+ {{- end }}
+ {{- if .Values.audit.logTimezone }}
+ - name: POSTGRESQL_LOG_TIMEZONE
+ value: {{ .Values.audit.logTimezone | quote }}
+ {{- end }}
+ {{- if .Values.audit.pgAuditLog }}
+ - name: POSTGRESQL_PGAUDIT_LOG
+ value: {{ .Values.audit.pgAuditLog | quote }}
+ {{- end }}
+ - name: POSTGRESQL_PGAUDIT_LOG_CATALOG
+ value: {{ .Values.audit.pgAuditLogCatalog | quote }}
+ # Others
+ - name: POSTGRESQL_CLIENT_MIN_MESSAGES
+ value: {{ .Values.audit.clientMinMessages | quote }}
+ - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES
+ value: {{ .Values.postgresqlSharedPreloadLibraries | quote }}
+ {{- if .Values.readReplicas.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if or .Values.readReplicas.extraEnvVarsCM .Values.readReplicas.extraEnvVarsSecret }}
+ envFrom:
+ {{- if .Values.readReplicas.extraEnvVarsCM }}
+ - configMapRef:
+ name: {{ .Values.readReplicas.extraEnvVarsCM }}
+ {{- end }}
+ {{- if .Values.readReplicas.extraEnvVarsSecret }}
+ - secretRef:
+ name: {{ .Values.readReplicas.extraEnvVarsSecret }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - name: tcp-postgresql
+ containerPort: {{ .Values.containerPorts.postgresql }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.readReplicas.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.readReplicas.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.startupProbe "enabled") "context" $) | nindent 12 }}
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ {{- if (include "postgresql.database" .) }}
+ - exec pg_isready -U {{ default "postgres" $customUser| quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- else }}
+ - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.readReplicas.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.livenessProbe "enabled") "context" $) | nindent 12 }}
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ {{- if (include "postgresql.database" .) }}
+ - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- else }}
+ - exec pg_isready -U {{default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.readReplicas.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.readinessProbe "enabled") "context" $) | nindent 12 }}
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - -e
+ {{- include "postgresql.readinessProbeCommand" . | nindent 16 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.resources }}
+ resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }}
+ {{- end }}
+ {{- if .Values.readReplicas.lifecycleHooks }}
+ lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.lifecycleHooks "context" $) | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: postgresql-password
+ mountPath: /opt/bitnami/postgresql/secrets/
+ {{- end }}
+ {{- if .Values.readReplicas.extendedConfiguration }}
+ - name: postgresql-extended-config
+ mountPath: {{ .Values.readReplicas.persistence.mountPath }}/conf/conf.d/
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: postgresql-certificates
+ mountPath: /opt/bitnami/postgresql/certs
+ readOnly: true
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ - name: dshm
+ mountPath: /dev/shm
+ {{- end }}
+ {{- if .Values.readReplicas.persistence.enabled }}
+ - name: data
+ mountPath: {{ .Values.readReplicas.persistence.mountPath }}
+ {{- if .Values.readReplicas.persistence.subPath }}
+ subPath: {{ .Values.readReplicas.persistence.subPath }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraVolumeMounts "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.metrics.enabled }}
+ - name: metrics
+ image: {{ include "postgresql.metrics.image" . }}
+ imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
+ {{- if .Values.metrics.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.customMetrics }}
+ args: [ "--extend.query-path", "/conf/custom-metrics.yaml" ]
+ {{- end }}
+ env:
+ {{- $database := required "In order to enable metrics you need to specify a database (.Values.auth.database or .Values.global.postgresql.auth.database)" (include "postgresql.database" .) }}
+ - name: DATA_SOURCE_URI
+ value: {{ printf "127.0.0.1:%d/%s?sslmode=disable" (int (include "postgresql.service.port" .)) $database }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: DATA_SOURCE_PASS_FILE
+ value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.userPasswordKey" .) }}
+ {{- else }}
+ - name: DATA_SOURCE_PASS
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "postgresql.secretName" . }}
+ key: {{ include "postgresql.userPasswordKey" . }}
+ {{- end }}
+ - name: DATA_SOURCE_USER
+ value: {{ default "postgres" $customUser | quote }}
+ {{- if .Values.metrics.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ ports:
+ - name: http-metrics
+ containerPort: {{ .Values.metrics.containerPorts.metrics }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.metrics.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: http-metrics
+ {{- end }}
+ {{- if .Values.metrics.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /
+ port: http-metrics
+ {{- end }}
+ {{- if .Values.metrics.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.metrics.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /
+ port: http-metrics
+ {{- end }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: postgresql-password
+ mountPath: /opt/bitnami/postgresql/secrets/
+ {{- end }}
+ {{- if .Values.metrics.customMetrics }}
+ - name: custom-metrics
+ mountPath: /conf
+ readOnly: true
+ {{- end }}
+ {{- if .Values.metrics.resources }}
+ resources: {{- toYaml .Values.metrics.resources | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.sidecars }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.sidecars "context" $ ) | nindent 8 }}
+ {{- end }}
+ volumes:
+ {{- if .Values.readReplicas.extendedConfiguration }}
+ - name: postgresql-extended-config
+ configMap:
+ name: {{ include "postgresql.readReplicas.extendedConfigmapName" . }}
+ {{- end }}
+ {{- if .Values.auth.usePasswordFiles }}
+ - name: postgresql-password
+ secret:
+ secretName: {{ include "postgresql.secretName" . }}
+ {{- end }}
+ {{- if .Values.tls.enabled }}
+ - name: raw-certificates
+ secret:
+ secretName: {{ include "postgresql.tlsSecretName" . }}
+ - name: postgresql-certificates
+ emptyDir: {}
+ {{- end }}
+ {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }}
+ - name: custom-metrics
+ configMap:
+ name: {{ printf "%s-metrics" (include "postgresql.readReplica.fullname" .) }}
+ {{- end }}
+ {{- if .Values.shmVolume.enabled }}
+ - name: dshm
+ emptyDir:
+ medium: Memory
+ {{- if .Values.shmVolume.sizeLimit }}
+ sizeLimit: {{ .Values.shmVolume.sizeLimit }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.readReplicas.extraVolumes }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.extraVolumes "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if and .Values.readReplicas.persistence.enabled .Values.readReplicas.persistence.existingClaim }}
+ - name: data
+ persistentVolumeClaim:
+ claimName: {{ tpl .Values.readReplicas.persistence.existingClaim $ }}
+ {{- else if not .Values.readReplicas.persistence.enabled }}
+ - name: data
+ emptyDir: {}
+ {{- else }}
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ {{- if .Values.readReplicas.persistence.annotations }}
+ annotations: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.annotations "context" $) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.readReplicas.persistence.labels }}
+ labels: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.labels "context" $) | nindent 10 }}
+ {{- end }}
+ spec:
+ accessModes:
+ {{- range .Values.readReplicas.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ {{- if .Values.readReplicas.persistence.dataSource }}
+ dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.dataSource "context" $) | nindent 10 }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.readReplicas.persistence.size | quote }}
+ {{- if .Values.readReplicas.persistence.selector }}
+ selector: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.selector "context" $) | nindent 10 }}
+ {{- end -}}
+ {{- include "common.storage.class" (dict "persistence" .Values.readReplicas.persistence "global" .Values.global) | nindent 8 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/postgresql/templates/read/svc-headless.yaml b/charts/postgresql/templates/read/svc-headless.yaml
new file mode 100644
index 0000000..ee8f756
--- /dev/null
+++ b/charts/postgresql/templates/read/svc-headless.yaml
@@ -0,0 +1,39 @@
+{{- if eq .Values.architecture "replication" }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "postgresql.readReplica.svc.headless" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ app.kubernetes.io/component: read
+ {{- if or .Values.readReplicas.service.headless.annotations .Values.commonAnnotations }}
+ annotations:
+ {{- if .Values.readReplicas.service.headless.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.service.headless.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+ # Use this annotation in addition to the actual publishNotReadyAddresses
+ # field below because the annotation will stop being respected soon but the
+ # field is broken in some versions of Kubernetes:
+ # https://github.com/kubernetes/kubernetes/issues/58662
+ service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
+spec:
+ type: ClusterIP
+ clusterIP: None
+ # We want all pods in the StatefulSet to have their addresses published for
+ # the sake of the other Postgresql pods even before they're ready, since they
+ # have to be able to talk to each other in order to become ready.
+ publishNotReadyAddresses: true
+ ports:
+ - name: tcp-postgresql
+ port: {{ include "postgresql.readReplica.service.port" . }}
+ targetPort: tcp-postgresql
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+ app.kubernetes.io/component: read
+{{- end }}
diff --git a/charts/postgresql/templates/read/svc.yaml b/charts/postgresql/templates/read/svc.yaml
new file mode 100644
index 0000000..c308c3f
--- /dev/null
+++ b/charts/postgresql/templates/read/svc.yaml
@@ -0,0 +1,55 @@
+{{- if eq .Values.architecture "replication" }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "postgresql.readReplica.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ app.kubernetes.io/component: read
+ {{- if or .Values.commonAnnotations .Values.readReplicas.service.annotations }}
+ annotations:
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.readReplicas.service.annotations }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.annotations "context" $) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+spec:
+ type: {{ .Values.readReplicas.service.type }}
+ {{- if or (eq .Values.readReplicas.service.type "LoadBalancer") (eq .Values.readReplicas.service.type "NodePort") }}
+ externalTrafficPolicy: {{ .Values.readReplicas.service.externalTrafficPolicy | quote }}
+ {{- end }}
+ {{- if and (eq .Values.readReplicas.service.type "LoadBalancer") (not (empty .Values.readReplicas.service.loadBalancerSourceRanges)) }}
+ loadBalancerSourceRanges: {{ .Values.readReplicas.service.loadBalancerSourceRanges }}
+ {{- end }}
+ {{- if and (eq .Values.readReplicas.service.type "LoadBalancer") (not (empty .Values.readReplicas.service.loadBalancerIP)) }}
+ loadBalancerIP: {{ .Values.readReplicas.service.loadBalancerIP }}
+ {{- end }}
+ {{- if and .Values.readReplicas.service.clusterIP (eq .Values.readReplicas.service.type "ClusterIP") }}
+ clusterIP: {{ .Values.readReplicas.service.clusterIP }}
+ {{- end }}
+ {{- if .Values.readReplicas.service.sessionAffinity }}
+ sessionAffinity: {{ .Values.readReplicas.service.sessionAffinity }}
+ {{- end }}
+ {{- if .Values.readReplicas.service.sessionAffinityConfig }}
+ sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.sessionAffinityConfig "context" $) | nindent 4 }}
+ {{- end }}
+ ports:
+ - name: tcp-postgresql
+ port: {{ include "postgresql.readReplica.service.port" . }}
+ targetPort: tcp-postgresql
+ {{- if and (or (eq .Values.readReplicas.service.type "NodePort") (eq .Values.readReplicas.service.type "LoadBalancer")) (not (empty .Values.readReplicas.service.nodePorts.postgresql)) }}
+ nodePort: {{ .Values.readReplicas.service.nodePorts.postgresql }}
+ {{- else if eq .Values.readReplicas.service.type "ClusterIP" }}
+ nodePort: null
+ {{- end }}
+ {{- if .Values.readReplicas.service.extraPorts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.extraPorts "context" $) | nindent 4 }}
+ {{- end }}
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+ app.kubernetes.io/component: read
+{{- end }}
diff --git a/charts/postgresql/templates/role.yaml b/charts/postgresql/templates/role.yaml
new file mode 100644
index 0000000..00f9222
--- /dev/null
+++ b/charts/postgresql/templates/role.yaml
@@ -0,0 +1,31 @@
+{{- if .Values.rbac.create }}
+kind: Role
+apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+# yamllint disable rule:indentation
+rules:
+ {{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}}
+ {{- if and $pspAvailable .Values.psp.create }}
+ - apiGroups:
+ - 'policy'
+ resources:
+ - 'podsecuritypolicies'
+ verbs:
+ - 'use'
+ resourceNames:
+ - {{ include "common.names.fullname" . }}
+ {{- end }}
+ {{- if .Values.rbac.rules }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }}
+ {{- end }}
+# yamllint enable rule:indentation
+{{- end }}
diff --git a/charts/postgresql/templates/rolebinding.yaml b/charts/postgresql/templates/rolebinding.yaml
new file mode 100644
index 0000000..0311c0e
--- /dev/null
+++ b/charts/postgresql/templates/rolebinding.yaml
@@ -0,0 +1,22 @@
+{{- if .Values.rbac.create }}
+kind: RoleBinding
+apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+roleRef:
+ kind: Role
+ name: {{ include "common.names.fullname" . }}
+ apiGroup: rbac.authorization.k8s.io
+subjects:
+ - kind: ServiceAccount
+ name: {{ include "postgresql.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace | quote }}
+{{- end }}
diff --git a/charts/postgresql/templates/secrets.yaml b/charts/postgresql/templates/secrets.yaml
new file mode 100644
index 0000000..e3d6e7a
--- /dev/null
+++ b/charts/postgresql/templates/secrets.yaml
@@ -0,0 +1,103 @@
+{{- $host := include "postgresql.primary.fullname" . }}
+{{- $port := include "postgresql.service.port" . }}
+{{- $postgresPassword := "" }}
+{{- if .Values.auth.enablePostgresUser }}
+{{- $postgresPassword = include "common.secrets.passwords.manage" (dict "secret" (include "postgresql.secretName" .) "key" $.Values.auth.secretKeys.adminPasswordKey "providedValues" (list "global.postgresql.auth.postgresPassword" "auth.postgresPassword") "context" $) | trimAll "\"" | b64dec }}
+{{- end }}
+{{- $replicationPassword := "" }}
+{{- if eq .Values.architecture "replication" }}
+{{- $replicationPassword = include "common.secrets.passwords.manage" (dict "secret" (include "postgresql.secretName" .) "key" $.Values.auth.secretKeys.replicationPasswordKey "providedValues" (list "auth.replicationPassword") "context" $) | trimAll "\"" | b64dec }}
+{{- end }}
+{{- $ldapPassword := "" }}
+{{- if and .Values.ldap.enabled (or .Values.ldap.bind_password .Values.ldap.bindpw) }}
+{{- $ldapPassword = coalesce .Values.ldap.bind_password .Values.ldap.bindpw }}
+{{- end }}
+{{- $customUser := include "postgresql.username" . }}
+{{- $password := "" }}
+{{- if not (empty (include "postgresql.username" .)) }}
+{{- $password = include "common.secrets.passwords.manage" (dict "secret" (include "postgresql.secretName" .) "key" $.Values.auth.secretKeys.userPasswordKey "providedValues" (list "global.postgresql.auth.password" "auth.password") "context" $) | trimAll "\"" | b64dec }}
+{{- end }}
+{{- $database := include "postgresql.database" . }}
+{{- if (include "postgresql.createSecret" .) }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "common.names.fullname" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+type: Opaque
+data:
+ {{- if .Values.auth.enablePostgresUser }}
+ postgres-password: {{ $postgresPassword | b64enc | quote }}
+ {{- end }}
+ {{- if not (empty (include "postgresql.username" .)) }}
+ password: {{ $password | b64enc | quote }}
+ {{- end }}
+ {{- if eq .Values.architecture "replication" }}
+ replication-password: {{ $replicationPassword | b64enc | quote }}
+ {{- end }}
+ # We don't auto-generate LDAP password when it's not provided as we do for other passwords
+ {{- if and .Values.ldap.enabled (or .Values.ldap.bind_password .Values.ldap.bindpw) }}
+ ldap-password: {{ $ldapPassword | b64enc | quote }}
+ {{- end }}
+{{- end }}
+{{- if .Values.serviceBindings.enabled }}
+{{- if .Values.auth.enablePostgresUser }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "common.names.fullname" . }}-svcbind-postgres
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+type: servicebinding.io/postgresql
+data:
+ provider: {{ print "bitnami" | b64enc | quote }}
+ type: {{ print "postgresql" | b64enc | quote }}
+ host: {{ $host | b64enc | quote }}
+ port: {{ $port | b64enc | quote }}
+ username: {{ print "postgres" | b64enc | quote }}
+ database: {{ print "postgres" | b64enc | quote }}
+ password: {{ $postgresPassword | b64enc | quote }}
+ uri: {{ printf "postgresql://postgres:%s@%s:%s/postgres" $postgresPassword $host $port | b64enc | quote }}
+{{- end }}
+{{- if and (not (empty $customUser)) (ne $customUser "postgres") }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "common.names.fullname" . }}-svcbind-custom-user
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+type: servicebinding.io/postgresql
+data:
+ provider: {{ print "bitnami" | b64enc | quote }}
+ type: {{ print "postgresql" | b64enc | quote }}
+ host: {{ $host | b64enc | quote }}
+ port: {{ $port | b64enc | quote }}
+ username: {{ $customUser | b64enc | quote }}
+ password: {{ $password | b64enc | quote }}
+ {{- if $database }}
+ database: {{ $database | b64enc | quote }}
+ {{- end }}
+ uri: {{ printf "postgresql://%s:%s@%s:%s/%s" $customUser $password $host $port $database | b64enc | quote }}
+{{- end }}
+{{- end }}
diff --git a/charts/postgresql/templates/serviceaccount.yaml b/charts/postgresql/templates/serviceaccount.yaml
new file mode 100644
index 0000000..179f8f2
--- /dev/null
+++ b/charts/postgresql/templates/serviceaccount.yaml
@@ -0,0 +1,19 @@
+{{- if .Values.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "postgresql.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ annotations:
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.serviceAccount.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
+{{- end }}
diff --git a/charts/postgresql/templates/tls-secrets.yaml b/charts/postgresql/templates/tls-secrets.yaml
new file mode 100644
index 0000000..482e298
--- /dev/null
+++ b/charts/postgresql/templates/tls-secrets.yaml
@@ -0,0 +1,28 @@
+{{- if (include "postgresql.createTlsSecret" . ) }}
+{{- $secretName := printf "%s-crt" (include "common.names.fullname" .) }}
+{{- $ca := genCA "postgresql-ca" 365 }}
+{{- $fullname := include "common.names.fullname" . }}
+{{- $releaseNamespace := .Release.Namespace }}
+{{- $clusterDomain := .Values.clusterDomain }}
+{{- $primaryHeadlessServiceName := include "postgresql.primary.svc.headless" . }}
+{{- $readHeadlessServiceName := include "postgresql.readReplica.svc.headless" . }}
+{{- $altNames := list (printf "*.%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $readHeadlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $readHeadlessServiceName $releaseNamespace $clusterDomain) $fullname }}
+{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ $secretName }}
+ namespace: {{ .Release.Namespace | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+type: kubernetes.io/tls
+data:
+ tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }}
+ tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }}
+ ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }}
+{{- end }}
diff --git a/charts/postgresql/values.schema.json b/charts/postgresql/values.schema.json
new file mode 100644
index 0000000..fc41483
--- /dev/null
+++ b/charts/postgresql/values.schema.json
@@ -0,0 +1,156 @@
+{
+ "$schema": "http://json-schema.org/schema#",
+ "type": "object",
+ "properties": {
+ "architecture": {
+ "type": "string",
+ "title": "PostgreSQL architecture",
+ "form": true,
+ "description": "Allowed values: `standalone` or `replication`"
+ },
+ "auth": {
+ "type": "object",
+ "title": "Authentication configuration",
+ "form": true,
+ "properties": {
+ "enablePostgresUser": {
+ "type": "boolean",
+ "title": "Enable \"postgres\" admin user",
+ "description": "Assign a password to the \"postgres\" admin user. Otherwise, remote access will be blocked for this user",
+ "form": true
+ },
+ "postgresPassword": {
+ "type": "string",
+ "title": "Password for the \"postgres\" admin user",
+ "description": "Defaults to a random 10-character alphanumeric string if not set",
+ "form": true
+ },
+ "database": {
+ "type": "string",
+ "title": "PostgreSQL custom database",
+ "description": "Name of the custom database to be created during the 1st initialization of PostgreSQL",
+ "form": true
+ },
+ "username": {
+ "type": "string",
+ "title": "PostgreSQL custom user",
+ "description": "Name of the custom user to be created during the 1st initialization of PostgreSQL. This user only has permissions on the PostgreSQL custom database",
+ "form": true
+ },
+ "password": {
+ "type": "string",
+ "title": "Password for the custom user to create",
+ "description": "Defaults to a random 10-character alphanumeric string if not set",
+ "form": true
+ },
+ "replicationUsername": {
+ "type": "string",
+ "title": "PostgreSQL replication user",
+ "description": "Name of user used to manage replication.",
+ "form": true,
+ "hidden": {
+ "value": "standalone",
+ "path": "architecture"
+ }
+ },
+ "replicationPassword": {
+ "type": "string",
+ "title": "Password for PostgreSQL replication user",
+ "description": "Defaults to a random 10-character alphanumeric string if not set",
+ "form": true,
+ "hidden": {
+ "value": "standalone",
+ "path": "architecture"
+ }
+ }
+ }
+ },
+ "persistence": {
+ "type": "object",
+ "properties": {
+ "size": {
+ "type": "string",
+ "title": "Persistent Volume Size",
+ "form": true,
+ "render": "slider",
+ "sliderMin": 1,
+ "sliderMax": 100,
+ "sliderUnit": "Gi"
+ }
+ }
+ },
+ "resources": {
+ "type": "object",
+ "title": "Required Resources",
+ "description": "Configure resource requests",
+ "form": true,
+ "properties": {
+ "requests": {
+ "type": "object",
+ "properties": {
+ "memory": {
+ "type": "string",
+ "form": true,
+ "render": "slider",
+ "title": "Memory Request",
+ "sliderMin": 10,
+ "sliderMax": 2048,
+ "sliderUnit": "Mi"
+ },
+ "cpu": {
+ "type": "string",
+ "form": true,
+ "render": "slider",
+ "title": "CPU Request",
+ "sliderMin": 10,
+ "sliderMax": 2000,
+ "sliderUnit": "m"
+ }
+ }
+ }
+ }
+ },
+ "replication": {
+ "type": "object",
+ "form": true,
+ "title": "Replication Details",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "title": "Enable Replication",
+ "form": true
+ },
+ "readReplicas": {
+ "type": "integer",
+ "title": "read Replicas",
+ "form": true,
+ "hidden": {
+ "value": "standalone",
+ "path": "architecture"
+ }
+ }
+ }
+ },
+ "volumePermissions": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "form": true,
+ "title": "Enable Init Containers",
+ "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup"
+ }
+ }
+ },
+ "metrics": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "title": "Configure metrics exporter",
+ "form": true
+ }
+ }
+ }
+ }
+}
diff --git a/charts/postgresql/values.yaml b/charts/postgresql/values.yaml
new file mode 100644
index 0000000..7f8b20b
--- /dev/null
+++ b/charts/postgresql/values.yaml
@@ -0,0 +1,1425 @@
+## @section Global parameters
+## Please, note that this will override the parameters, including dependencies, configured to use the global value
+##
+global:
+ ## @param global.imageRegistry Global Docker image registry
+ ##
+ imageRegistry: ""
+ ## @param global.imagePullSecrets Global Docker registry secret names as an array
+ ## e.g.
+ ## imagePullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ imagePullSecrets: []
+ ## @param global.storageClass Global StorageClass for Persistent Volume(s)
+ ##
+ storageClass: ""
+ postgresql:
+ ## @param global.postgresql.auth.postgresPassword Password for the "postgres" admin user (overrides `auth.postgresPassword`)
+ ## @param global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`)
+ ## @param global.postgresql.auth.password Password for the custom user to create (overrides `auth.password`)
+ ## @param global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`)
+ ## @param global.postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`).
+ ## @param global.postgresql.auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set.
+ ## @param global.postgresql.auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set.
+ ## @param global.postgresql.auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set.
+ ##
+ auth:
+ postgresPassword: ""
+ username: ""
+ password: ""
+ database: ""
+ existingSecret: ""
+ secretKeys:
+ adminPasswordKey: ""
+ userPasswordKey: ""
+ replicationPasswordKey: ""
+ ## @param global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`)
+ ##
+ service:
+ ports:
+ postgresql: ""
+
+## @section Common parameters
+##
+
+## @param kubeVersion Override Kubernetes version
+##
+kubeVersion: ""
+## @param nameOverride String to partially override common.names.fullname template (will maintain the release name)
+##
+nameOverride: ""
+## @param fullnameOverride String to fully override common.names.fullname template
+##
+fullnameOverride: ""
+## @param clusterDomain Kubernetes Cluster Domain
+##
+clusterDomain: cluster.local
+## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template)
+##
+extraDeploy: []
+## @param commonLabels Add labels to all the deployed resources
+##
+commonLabels: {}
+## @param commonAnnotations Add annotations to all the deployed resources
+##
+commonAnnotations: {}
+## Enable diagnostic mode in the statefulset
+##
+diagnosticMode:
+ ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
+ ##
+ enabled: false
+ ## @param diagnosticMode.command Command to override all containers in the statefulset
+ ##
+ command:
+ - sleep
+ ## @param diagnosticMode.args Args to override all containers in the statefulset
+ ##
+ args:
+ - infinity
+
+## @section PostgreSQL common parameters
+##
+
+## Bitnami PostgreSQL image version
+## ref: https://hub.docker.com/r/bitnami/postgresql/tags/
+## @param image.registry PostgreSQL image registry
+## @param image.repository PostgreSQL image repository
+## @param image.tag PostgreSQL image tag (immutable tags are recommended)
+## @param image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+## @param image.pullPolicy PostgreSQL image pull policy
+## @param image.pullSecrets Specify image pull secrets
+## @param image.debug Specify if debug values should be set
+##
+image:
+ registry: docker.io
+ repository: bitnami/postgresql
+ tag: 15.3.0-debian-11-r7
+ digest: ""
+ ## Specify a imagePullPolicy
+ ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+ ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+ ##
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## Example:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+ ## Set to true if you would like to see extra information on logs
+ ##
+ debug: false
+## Authentication parameters
+## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#setting-the-root-password-on-first-run
+## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-on-first-run
+## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-user-on-first-run
+##
+auth:
+ ## @param auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user
+ ##
+ enablePostgresUser: true
+ ## @param auth.postgresPassword Password for the "postgres" admin user. Ignored if `auth.existingSecret` is provided
+ ##
+ postgresPassword: ""
+ ## @param auth.username Name for a custom user to create
+ ##
+ username: ""
+ ## @param auth.password Password for the custom user to create. Ignored if `auth.existingSecret` is provided
+ ##
+ password: ""
+ ## @param auth.database Name for a custom database to create
+ ##
+ database: ""
+ ## @param auth.replicationUsername Name of the replication user
+ ##
+ replicationUsername: repl_user
+ ## @param auth.replicationPassword Password for the replication user. Ignored if `auth.existingSecret` is provided
+ ##
+ replicationPassword: ""
+ ## @param auth.existingSecret Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case.
+ ##
+ existingSecret: ""
+ ## @param auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set.
+ ## @param auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set.
+ ## @param auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set.
+ ##
+ secretKeys:
+ adminPasswordKey: postgres-password
+ userPasswordKey: password
+ replicationPasswordKey: replication-password
+ ## @param auth.usePasswordFiles Mount credentials as a files instead of using an environment variable
+ ##
+ usePasswordFiles: false
+## @param architecture PostgreSQL architecture (`standalone` or `replication`)
+##
+architecture: standalone
+## Replication configuration
+## Ignored if `architecture` is `standalone`
+##
+replication:
+ ## @param replication.synchronousCommit Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off`
+ ## @param replication.numSynchronousReplicas Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`.
+ ## ref: https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT
+ ##
+ synchronousCommit: "off"
+ numSynchronousReplicas: 0
+ ## @param replication.applicationName Cluster application name. Useful for advanced replication settings
+ ##
+ applicationName: my_application
+## @param containerPorts.postgresql PostgreSQL container port
+##
+containerPorts:
+ postgresql: 5432
+## Audit settings
+## https://github.com/bitnami/containers/tree/main/bitnami/postgresql#auditing
+## @param audit.logHostname Log client hostnames
+## @param audit.logConnections Add client log-in operations to the log file
+## @param audit.logDisconnections Add client log-outs operations to the log file
+## @param audit.pgAuditLog Add operations to log using the pgAudit extension
+## @param audit.pgAuditLogCatalog Log catalog using pgAudit
+## @param audit.clientMinMessages Message log level to share with the user
+## @param audit.logLinePrefix Template for log line prefix (default if not set)
+## @param audit.logTimezone Timezone for the log timestamps
+##
+audit:
+ logHostname: false
+ logConnections: false
+ logDisconnections: false
+ pgAuditLog: ""
+ pgAuditLogCatalog: "off"
+ clientMinMessages: error
+ logLinePrefix: ""
+ logTimezone: ""
+## LDAP configuration
+## @param ldap.enabled Enable LDAP support
+## DEPRECATED ldap.url It will removed in a future, please use 'ldap.uri' instead
+## @param ldap.server IP address or name of the LDAP server.
+## @param ldap.port Port number on the LDAP server to connect to
+## @param ldap.prefix String to prepend to the user name when forming the DN to bind
+## @param ldap.suffix String to append to the user name when forming the DN to bind
+## DEPRECATED ldap.baseDN It will removed in a future, please use 'ldap.basedn' instead
+## DEPRECATED ldap.bindDN It will removed in a future, please use 'ldap.binddn' instead
+## DEPRECATED ldap.bind_password It will removed in a future, please use 'ldap.bindpw' instead
+## @param ldap.basedn Root DN to begin the search for the user in
+## @param ldap.binddn DN of user to bind to LDAP
+## @param ldap.bindpw Password for the user to bind to LDAP
+## DEPRECATED ldap.search_attr It will removed in a future, please use 'ldap.searchAttribute' instead
+## DEPRECATED ldap.search_filter It will removed in a future, please use 'ldap.searchFilter' instead
+## @param ldap.searchAttribute Attribute to match against the user name in the search
+## @param ldap.searchFilter The search filter to use when doing search+bind authentication
+## @param ldap.scheme Set to `ldaps` to use LDAPS
+## DEPRECATED ldap.tls as string is deprecated,please use 'ldap.tls.enabled' instead
+## @param ldap.tls.enabled Se to true to enable TLS encryption
+##
+ldap:
+ enabled: false
+ server: ""
+ port: ""
+ prefix: ""
+ suffix: ""
+ basedn: ""
+ binddn: ""
+ bindpw: ""
+ searchAttribute: ""
+ searchFilter: ""
+ scheme: ""
+ tls:
+ enabled: false
+ ## @param ldap.uri LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored.
+ ## Ref: https://www.postgresql.org/docs/current/auth-ldap.html
+ ##
+ uri: ""
+## @param postgresqlDataDir PostgreSQL data dir folder
+##
+postgresqlDataDir: /bitnami/postgresql/data
+## @param postgresqlSharedPreloadLibraries Shared preload libraries (comma-separated list)
+##
+postgresqlSharedPreloadLibraries: "pgaudit"
+## Start PostgreSQL pod(s) without limitations on shm memory.
+## By default docker and containerd (and possibly other container runtimes) limit `/dev/shm` to `64M`
+## ref: https://github.com/docker-library/postgres/issues/416
+## ref: https://github.com/containerd/containerd/issues/3654
+##
+shmVolume:
+ ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s)
+ ##
+ enabled: true
+ ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs
+ ## Note: the size of the tmpfs counts against container's memory limit
+ ## e.g:
+ ## sizeLimit: 1Gi
+ ##
+ sizeLimit: ""
+## TLS configuration
+##
+tls:
+ ## @param tls.enabled Enable TLS traffic support
+ ##
+ enabled: false
+ ## @param tls.autoGenerated Generate automatically self-signed TLS certificates
+ ##
+ autoGenerated: false
+ ## @param tls.preferServerCiphers Whether to use the server's TLS cipher preferences rather than the client's
+ ##
+ preferServerCiphers: true
+ ## @param tls.certificatesSecret Name of an existing secret that contains the certificates
+ ##
+ certificatesSecret: ""
+ ## @param tls.certFilename Certificate filename
+ ##
+ certFilename: ""
+ ## @param tls.certKeyFilename Certificate key filename
+ ##
+ certKeyFilename: ""
+ ## @param tls.certCAFilename CA Certificate filename
+ ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate
+ ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html
+ ##
+ certCAFilename: ""
+ ## @param tls.crlFilename File containing a Certificate Revocation List
+ ##
+ crlFilename: ""
+
+## @section PostgreSQL Primary parameters
+##
+primary:
+ ## @param primary.name Name of the primary database (eg primary, master, leader, ...)
+ ##
+ name: primary
+ ## @param primary.configuration PostgreSQL Primary main configuration to be injected as ConfigMap
+ ## ref: https://www.postgresql.org/docs/current/static/runtime-config.html
+ ##
+ configuration: ""
+ ## @param primary.pgHbaConfiguration PostgreSQL Primary client authentication configuration
+ ## ref: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html
+ ## e.g:#
+ ## pgHbaConfiguration: |-
+ ## local all all trust
+ ## host all all localhost trust
+ ## host mydatabase mysuser 192.168.0.0/24 md5
+ ##
+ pgHbaConfiguration: ""
+ ## @param primary.existingConfigmap Name of an existing ConfigMap with PostgreSQL Primary configuration
+ ## NOTE: `primary.configuration` and `primary.pgHbaConfiguration` will be ignored
+ ##
+ existingConfigmap: ""
+ ## @param primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration)
+ ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
+ ##
+ extendedConfiguration: ""
+ ## @param primary.existingExtendedConfigmap Name of an existing ConfigMap with PostgreSQL Primary extended configuration
+ ## NOTE: `primary.extendedConfiguration` will be ignored
+ ##
+ existingExtendedConfigmap: ""
+ ## Initdb configuration
+ ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#specifying-initdb-arguments
+ ##
+ initdb:
+ ## @param primary.initdb.args PostgreSQL initdb extra arguments
+ ##
+ args: ""
+ ## @param primary.initdb.postgresqlWalDir Specify a custom location for the PostgreSQL transaction log
+ ##
+ postgresqlWalDir: ""
+ ## @param primary.initdb.scripts Dictionary of initdb scripts
+ ## Specify dictionary of scripts to be run at first boot
+ ## e.g:
+ ## scripts:
+ ## my_init_script.sh: |
+ ## #!/bin/sh
+ ## echo "Do something."
+ ##
+ scripts: {}
+ ## @param primary.initdb.scriptsConfigMap ConfigMap with scripts to be run at first boot
+ ## NOTE: This will override `primary.initdb.scripts`
+ ##
+ scriptsConfigMap: ""
+ ## @param primary.initdb.scriptsSecret Secret with scripts to be run at first boot (in case it contains sensitive information)
+ ## NOTE: This can work along `primary.initdb.scripts` or `primary.initdb.scriptsConfigMap`
+ ##
+ scriptsSecret: ""
+ ## @param primary.initdb.user Specify the PostgreSQL username to execute the initdb scripts
+ ##
+ user: ""
+ ## @param primary.initdb.password Specify the PostgreSQL password to execute the initdb scripts
+ ##
+ password: ""
+ ## Configure current cluster's primary server to be the standby server in other cluster.
+ ## This will allow cross cluster replication and provide cross cluster high availability.
+ ## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled.
+ ## @param primary.standby.enabled Whether to enable current cluster's primary as standby server of another cluster or not
+ ## @param primary.standby.primaryHost The Host of replication primary in the other cluster
+ ## @param primary.standby.primaryPort The Port of replication primary in the other cluster
+ ##
+ standby:
+ enabled: false
+ primaryHost: ""
+ primaryPort: ""
+ ## @param primary.extraEnvVars Array with extra environment variables to add to PostgreSQL Primary nodes
+ ## e.g:
+ ## extraEnvVars:
+ ## - name: FOO
+ ## value: "bar"
+ ##
+ extraEnvVars: []
+ ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes
+ ##
+ extraEnvVarsCM: ""
+ ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL Primary nodes
+ ##
+ extraEnvVarsSecret: ""
+ ## @param primary.command Override default container command (useful when using custom images)
+ ##
+ command: []
+ ## @param primary.args Override default container args (useful when using custom images)
+ ##
+ args: []
+ ## Configure extra options for PostgreSQL Primary containers' liveness, readiness and startup probes
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
+ ## @param primary.livenessProbe.enabled Enable livenessProbe on PostgreSQL Primary containers
+ ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+ ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe
+ ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+ ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe
+ ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe
+ ##
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## @param primary.readinessProbe.enabled Enable readinessProbe on PostgreSQL Primary containers
+ ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+ ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe
+ ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+ ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe
+ ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe
+ ##
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## @param primary.startupProbe.enabled Enable startupProbe on PostgreSQL Primary containers
+ ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
+ ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe
+ ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe
+ ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe
+ ## @param primary.startupProbe.successThreshold Success threshold for startupProbe
+ ##
+ startupProbe:
+ enabled: false
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 1
+ failureThreshold: 15
+ successThreshold: 1
+ ## @param primary.customLivenessProbe Custom livenessProbe that overrides the default one
+ ##
+ customLivenessProbe: {}
+ ## @param primary.customReadinessProbe Custom readinessProbe that overrides the default one
+ ##
+ customReadinessProbe: {}
+ ## @param primary.customStartupProbe Custom startupProbe that overrides the default one
+ ##
+ customStartupProbe: {}
+ ## @param primary.lifecycleHooks for the PostgreSQL Primary container to automate configuration before or after startup
+ ##
+ lifecycleHooks: {}
+ ## PostgreSQL Primary resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param primary.resources.limits The resources limits for the PostgreSQL Primary containers
+ ## @param primary.resources.requests.memory The requested memory for the PostgreSQL Primary containers
+ ## @param primary.resources.requests.cpu The requested cpu for the PostgreSQL Primary containers
+ ##
+ resources:
+ limits: {}
+ requests:
+ memory: 256Mi
+ cpu: 250m
+ ## Pod Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ ## @param primary.podSecurityContext.enabled Enable security context
+ ## @param primary.podSecurityContext.fsGroup Group ID for the pod
+ ##
+ podSecurityContext:
+ enabled: true
+ fsGroup: 1001
+ ## Container Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ ## @param primary.containerSecurityContext.enabled Enable container security context
+ ## @param primary.containerSecurityContext.runAsUser User ID for the container
+ ##
+ containerSecurityContext:
+ enabled: true
+ runAsUser: 1001
+ ## @param primary.hostAliases PostgreSQL primary pods host aliases
+ ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
+ ##
+ hostAliases: []
+ ## @param primary.hostNetwork Specify if host network should be enabled for PostgreSQL pod (postgresql primary)
+ ##
+ hostNetwork: false
+ ## @param primary.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary)
+ ##
+ hostIPC: false
+ ## @param primary.labels Map of labels to add to the statefulset (postgresql primary)
+ ##
+ labels: {}
+ ## @param primary.annotations Annotations for PostgreSQL primary pods
+ ##
+ annotations: {}
+ ## @param primary.podLabels Map of labels to add to the pods (postgresql primary)
+ ##
+ podLabels: {}
+ ## @param primary.podAnnotations Map of annotations to add to the pods (postgresql primary)
+ ##
+ podAnnotations: {}
+ ## @param primary.podAffinityPreset PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAffinityPreset: ""
+ ## @param primary.podAntiAffinityPreset PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAntiAffinityPreset: soft
+ ## PostgreSQL Primary node affinity preset
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+ ##
+ nodeAffinityPreset:
+ ## @param primary.nodeAffinityPreset.type PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
+ ##
+ type: ""
+ ## @param primary.nodeAffinityPreset.key PostgreSQL primary node label key to match Ignored if `primary.affinity` is set.
+ ## E.g.
+ ## key: "kubernetes.io/e2e-az-name"
+ ##
+ key: ""
+ ## @param primary.nodeAffinityPreset.values PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set.
+ ## E.g.
+ ## values:
+ ## - e2e-az1
+ ## - e2e-az2
+ ##
+ values: []
+ ## @param primary.affinity Affinity for PostgreSQL primary pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set
+ ##
+ affinity: {}
+ ## @param primary.nodeSelector Node labels for PostgreSQL primary pods assignment
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+ ## @param primary.tolerations Tolerations for PostgreSQL primary pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ ## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
+ ##
+ topologySpreadConstraints: []
+ ## @param primary.priorityClassName Priority Class to use for each pod (postgresql primary)
+ ##
+ priorityClassName: ""
+ ## @param primary.schedulerName Use an alternate scheduler, e.g. "stork".
+ ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+ ##
+ schedulerName: ""
+ ## @param primary.terminationGracePeriodSeconds Seconds PostgreSQL primary pod needs to terminate gracefully
+ ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
+ ##
+ terminationGracePeriodSeconds: ""
+ ## @param primary.updateStrategy.type PostgreSQL Primary statefulset strategy type
+ ## @param primary.updateStrategy.rollingUpdate PostgreSQL Primary statefulset rolling update configuration parameters
+ ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+ ##
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate: {}
+ ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s)
+ ##
+ extraVolumeMounts: []
+ ## @param primary.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s)
+ ##
+ extraVolumes: []
+ ## @param primary.sidecars Add additional sidecar containers to the PostgreSQL Primary pod(s)
+ ## For example:
+ ## sidecars:
+ ## - name: your-image-name
+ ## image: your-image
+ ## imagePullPolicy: Always
+ ## ports:
+ ## - name: portname
+ ## containerPort: 1234
+ ##
+ sidecars: []
+ ## @param primary.initContainers Add additional init containers to the PostgreSQL Primary pod(s)
+ ## Example
+ ##
+ ## initContainers:
+ ## - name: do-something
+ ## image: busybox
+ ## command: ['do', 'something']
+ ##
+ initContainers: []
+ ## @param primary.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL Primary pod(s)
+ ##
+ extraPodSpec: {}
+ ## PostgreSQL Primary service configuration
+ ##
+ service:
+ ## @param primary.service.type Kubernetes Service type
+ ##
+ type: ClusterIP
+ ## @param primary.service.ports.postgresql PostgreSQL service port
+ ##
+ ports:
+ postgresql: 5432
+ ## Node ports to expose
+ ## NOTE: choose port between <30000-32767>
+ ## @param primary.service.nodePorts.postgresql Node port for PostgreSQL
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ ##
+ nodePorts:
+ postgresql: ""
+ ## @param primary.service.clusterIP Static clusterIP or None for headless services
+ ## e.g:
+ ## clusterIP: None
+ ##
+ clusterIP: ""
+ ## @param primary.service.annotations Annotations for PostgreSQL primary service
+ ##
+ annotations: {}
+ ## @param primary.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer`
+ ## Set the LoadBalancer service type to internal only
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+ ##
+ loadBalancerIP: ""
+ ## @param primary.service.externalTrafficPolicy Enable client source IP preservation
+ ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ ##
+ externalTrafficPolicy: Cluster
+ ## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer
+ ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
+ ##
+ ## loadBalancerSourceRanges:
+ ## - 10.10.10.0/24
+ ##
+ loadBalancerSourceRanges: []
+ ## @param primary.service.extraPorts Extra ports to expose in the PostgreSQL primary service
+ ##
+ extraPorts: []
+ ## @param primary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
+ ## If "ClientIP", consecutive client requests will be directed to the same Pod
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ ##
+ sessionAffinity: None
+ ## @param primary.service.sessionAffinityConfig Additional settings for the sessionAffinity
+ ## sessionAffinityConfig:
+ ## clientIP:
+ ## timeoutSeconds: 300
+ ##
+ sessionAffinityConfig: {}
+ ## Headless service properties
+ ##
+ headless:
+ ## @param primary.service.headless.annotations Additional custom annotations for headless PostgreSQL primary service
+ ##
+ annotations: {}
+ ## PostgreSQL Primary persistence configuration
+ ##
+ persistence:
+ ## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC
+ ##
+ enabled: true
+ ## @param primary.persistence.existingClaim Name of an existing PVC to use
+ ##
+ existingClaim: ""
+ ## @param primary.persistence.mountPath The path the volume will be mounted at
+ ## Note: useful when using custom PostgreSQL images
+ ##
+ mountPath: /bitnami/postgresql
+ ## @param primary.persistence.subPath The subdirectory of the volume to mount to
+ ## Useful in dev environments and one PV for multiple services
+ ##
+ subPath: ""
+ ## @param primary.persistence.storageClass PVC Storage Class for PostgreSQL Primary data volume
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ storageClass: ""
+ ## @param primary.persistence.accessModes PVC Access Mode for PostgreSQL volume
+ ##
+ accessModes:
+ - ReadWriteOnce
+ ## @param primary.persistence.size PVC Storage Request for PostgreSQL volume
+ ##
+ size: 8Gi
+ ## @param primary.persistence.annotations Annotations for the PVC
+ ##
+ annotations: {}
+ ## @param primary.persistence.labels Labels for the PVC
+ ##
+ labels: {}
+ ## @param primary.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template)
+ ## selector:
+ ## matchLabels:
+ ## app: my-app
+ ##
+ selector: {}
+ ## @param primary.persistence.dataSource Custom PVC data source
+ ##
+ dataSource: {}
+
+## @section PostgreSQL read only replica parameters (only used when `architecture` is set to `replication`)
+##
+readReplicas:
+ ## @param readReplicas.name Name of the read replicas database (eg secondary, slave, ...)
+ ##
+ name: read
+ ## @param readReplicas.replicaCount Number of PostgreSQL read only replicas
+ ##
+ replicaCount: 1
+ ## @param readReplicas.extendedConfiguration Extended PostgreSQL read only replicas configuration (appended to main or default configuration)
+ ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
+ ##
+ extendedConfiguration: ""
+ ## @param readReplicas.extraEnvVars Array with extra environment variables to add to PostgreSQL read only nodes
+ ## e.g:
+ ## extraEnvVars:
+ ## - name: FOO
+ ## value: "bar"
+ ##
+ extraEnvVars: []
+ ## @param readReplicas.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes
+ ##
+ extraEnvVarsCM: ""
+ ## @param readReplicas.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL read only nodes
+ ##
+ extraEnvVarsSecret: ""
+ ## @param readReplicas.command Override default container command (useful when using custom images)
+ ##
+ command: []
+ ## @param readReplicas.args Override default container args (useful when using custom images)
+ ##
+ args: []
+ ## Configure extra options for PostgreSQL read only containers' liveness, readiness and startup probes
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
+ ## @param readReplicas.livenessProbe.enabled Enable livenessProbe on PostgreSQL read only containers
+ ## @param readReplicas.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+ ## @param readReplicas.livenessProbe.periodSeconds Period seconds for livenessProbe
+ ## @param readReplicas.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+ ## @param readReplicas.livenessProbe.failureThreshold Failure threshold for livenessProbe
+ ## @param readReplicas.livenessProbe.successThreshold Success threshold for livenessProbe
+ ##
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## @param readReplicas.readinessProbe.enabled Enable readinessProbe on PostgreSQL read only containers
+ ## @param readReplicas.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+ ## @param readReplicas.readinessProbe.periodSeconds Period seconds for readinessProbe
+ ## @param readReplicas.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+ ## @param readReplicas.readinessProbe.failureThreshold Failure threshold for readinessProbe
+ ## @param readReplicas.readinessProbe.successThreshold Success threshold for readinessProbe
+ ##
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## @param readReplicas.startupProbe.enabled Enable startupProbe on PostgreSQL read only containers
+ ## @param readReplicas.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
+ ## @param readReplicas.startupProbe.periodSeconds Period seconds for startupProbe
+ ## @param readReplicas.startupProbe.timeoutSeconds Timeout seconds for startupProbe
+ ## @param readReplicas.startupProbe.failureThreshold Failure threshold for startupProbe
+ ## @param readReplicas.startupProbe.successThreshold Success threshold for startupProbe
+ ##
+ startupProbe:
+ enabled: false
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 1
+ failureThreshold: 15
+ successThreshold: 1
+ ## @param readReplicas.customLivenessProbe Custom livenessProbe that overrides the default one
+ ##
+ customLivenessProbe: {}
+ ## @param readReplicas.customReadinessProbe Custom readinessProbe that overrides the default one
+ ##
+ customReadinessProbe: {}
+ ## @param readReplicas.customStartupProbe Custom startupProbe that overrides the default one
+ ##
+ customStartupProbe: {}
+ ## @param readReplicas.lifecycleHooks for the PostgreSQL read only container to automate configuration before or after startup
+ ##
+ lifecycleHooks: {}
+ ## PostgreSQL read only resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param readReplicas.resources.limits The resources limits for the PostgreSQL read only containers
+ ## @param readReplicas.resources.requests.memory The requested memory for the PostgreSQL read only containers
+ ## @param readReplicas.resources.requests.cpu The requested cpu for the PostgreSQL read only containers
+ ##
+ resources:
+ limits: {}
+ requests:
+ memory: 256Mi
+ cpu: 250m
+ ## Pod Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ ## @param readReplicas.podSecurityContext.enabled Enable security context
+ ## @param readReplicas.podSecurityContext.fsGroup Group ID for the pod
+ ##
+ podSecurityContext:
+ enabled: true
+ fsGroup: 1001
+ ## Container Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ ## @param readReplicas.containerSecurityContext.enabled Enable container security context
+ ## @param readReplicas.containerSecurityContext.runAsUser User ID for the container
+ ##
+ containerSecurityContext:
+ enabled: true
+ runAsUser: 1001
+ ## @param readReplicas.hostAliases PostgreSQL read only pods host aliases
+ ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
+ ##
+ hostAliases: []
+ ## @param readReplicas.hostNetwork Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only)
+ ##
+ hostNetwork: false
+ ## @param readReplicas.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary)
+ ##
+ hostIPC: false
+ ## @param readReplicas.labels Map of labels to add to the statefulset (PostgreSQL read only)
+ ##
+ labels: {}
+ ## @param readReplicas.annotations Annotations for PostgreSQL read only pods
+ ##
+ annotations: {}
+ ## @param readReplicas.podLabels Map of labels to add to the pods (PostgreSQL read only)
+ ##
+ podLabels: {}
+ ## @param readReplicas.podAnnotations Map of annotations to add to the pods (PostgreSQL read only)
+ ##
+ podAnnotations: {}
+ ## @param readReplicas.podAffinityPreset PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAffinityPreset: ""
+ ## @param readReplicas.podAntiAffinityPreset PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAntiAffinityPreset: soft
+ ## PostgreSQL read only node affinity preset
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+ ##
+ nodeAffinityPreset:
+ ## @param readReplicas.nodeAffinityPreset.type PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
+ ##
+ type: ""
+ ## @param readReplicas.nodeAffinityPreset.key PostgreSQL read only node label key to match Ignored if `primary.affinity` is set.
+ ## E.g.
+ ## key: "kubernetes.io/e2e-az-name"
+ ##
+ key: ""
+ ## @param readReplicas.nodeAffinityPreset.values PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set.
+ ## E.g.
+ ## values:
+ ## - e2e-az1
+ ## - e2e-az2
+ ##
+ values: []
+ ## @param readReplicas.affinity Affinity for PostgreSQL read only pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set
+ ##
+ affinity: {}
+ ## @param readReplicas.nodeSelector Node labels for PostgreSQL read only pods assignment
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+ ## @param readReplicas.tolerations Tolerations for PostgreSQL read only pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ ## @param readReplicas.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
+ ##
+ topologySpreadConstraints: []
+ ## @param readReplicas.priorityClassName Priority Class to use for each pod (PostgreSQL read only)
+ ##
+ priorityClassName: ""
+ ## @param readReplicas.schedulerName Use an alternate scheduler, e.g. "stork".
+ ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+ ##
+ schedulerName: ""
+ ## @param readReplicas.terminationGracePeriodSeconds Seconds PostgreSQL read only pod needs to terminate gracefully
+ ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
+ ##
+ terminationGracePeriodSeconds: ""
+ ## @param readReplicas.updateStrategy.type PostgreSQL read only statefulset strategy type
+ ## @param readReplicas.updateStrategy.rollingUpdate PostgreSQL read only statefulset rolling update configuration parameters
+ ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+ ##
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate: {}
+ ## @param readReplicas.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s)
+ ##
+ extraVolumeMounts: []
+ ## @param readReplicas.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s)
+ ##
+ extraVolumes: []
+ ## @param readReplicas.sidecars Add additional sidecar containers to the PostgreSQL read only pod(s)
+ ## For example:
+ ## sidecars:
+ ## - name: your-image-name
+ ## image: your-image
+ ## imagePullPolicy: Always
+ ## ports:
+ ## - name: portname
+ ## containerPort: 1234
+ ##
+ sidecars: []
+ ## @param readReplicas.initContainers Add additional init containers to the PostgreSQL read only pod(s)
+ ## Example
+ ##
+ ## initContainers:
+ ## - name: do-something
+ ## image: busybox
+ ## command: ['do', 'something']
+ ##
+ initContainers: []
+ ## @param readReplicas.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL read only pod(s)
+ ##
+ extraPodSpec: {}
+ ## PostgreSQL read only service configuration
+ ##
+ service:
+ ## @param readReplicas.service.type Kubernetes Service type
+ ##
+ type: ClusterIP
+ ## @param readReplicas.service.ports.postgresql PostgreSQL service port
+ ##
+ ports:
+ postgresql: 5432
+ ## Node ports to expose
+ ## NOTE: choose port between <30000-32767>
+ ## @param readReplicas.service.nodePorts.postgresql Node port for PostgreSQL
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ ##
+ nodePorts:
+ postgresql: ""
+ ## @param readReplicas.service.clusterIP Static clusterIP or None for headless services
+ ## e.g:
+ ## clusterIP: None
+ ##
+ clusterIP: ""
+ ## @param readReplicas.service.annotations Annotations for PostgreSQL read only service
+ ##
+ annotations: {}
+ ## @param readReplicas.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer`
+ ## Set the LoadBalancer service type to internal only
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+ ##
+ loadBalancerIP: ""
+ ## @param readReplicas.service.externalTrafficPolicy Enable client source IP preservation
+ ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ ##
+ externalTrafficPolicy: Cluster
+ ## @param readReplicas.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer
+ ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
+ ##
+ ## loadBalancerSourceRanges:
+ ## - 10.10.10.0/24
+ ##
+ loadBalancerSourceRanges: []
+ ## @param readReplicas.service.extraPorts Extra ports to expose in the PostgreSQL read only service
+ ##
+ extraPorts: []
+ ## @param readReplicas.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
+ ## If "ClientIP", consecutive client requests will be directed to the same Pod
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ ##
+ sessionAffinity: None
+ ## @param readReplicas.service.sessionAffinityConfig Additional settings for the sessionAffinity
+ ## sessionAffinityConfig:
+ ## clientIP:
+ ## timeoutSeconds: 300
+ ##
+ sessionAffinityConfig: {}
+ ## Headless service properties
+ ##
+ headless:
+ ## @param readReplicas.service.headless.annotations Additional custom annotations for headless PostgreSQL read only service
+ ##
+ annotations: {}
+ ## PostgreSQL read only persistence configuration
+ ##
+ persistence:
+ ## @param readReplicas.persistence.enabled Enable PostgreSQL read only data persistence using PVC
+ ##
+ enabled: true
+ ## @param readReplicas.persistence.existingClaim Name of an existing PVC to use
+ ##
+ existingClaim: ""
+ ## @param readReplicas.persistence.mountPath The path the volume will be mounted at
+ ## Note: useful when using custom PostgreSQL images
+ ##
+ mountPath: /bitnami/postgresql
+ ## @param readReplicas.persistence.subPath The subdirectory of the volume to mount to
+ ## Useful in dev environments and one PV for multiple services
+ ##
+ subPath: ""
+ ## @param readReplicas.persistence.storageClass PVC Storage Class for PostgreSQL read only data volume
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ storageClass: ""
+ ## @param readReplicas.persistence.accessModes PVC Access Mode for PostgreSQL volume
+ ##
+ accessModes:
+ - ReadWriteOnce
+ ## @param readReplicas.persistence.size PVC Storage Request for PostgreSQL volume
+ ##
+ size: 8Gi
+ ## @param readReplicas.persistence.annotations Annotations for the PVC
+ ##
+ annotations: {}
+ ## @param readReplicas.persistence.labels Labels for the PVC
+ ##
+ labels: {}
+ ## @param readReplicas.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template)
+ ## selector:
+ ## matchLabels:
+ ## app: my-app
+ ##
+ selector: {}
+ ## @param readReplicas.persistence.dataSource Custom PVC data source
+ ##
+ dataSource: {}
+
+## @section NetworkPolicy parameters
+##
+
+## Add networkpolicies
+##
+networkPolicy:
+ ## @param networkPolicy.enabled Enable network policies
+ ##
+ enabled: false
+ ## @param networkPolicy.metrics.enabled Enable network policies for metrics (prometheus)
+ ## @param networkPolicy.metrics.namespaceSelector [object] Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace.
+ ## @param networkPolicy.metrics.podSelector [object] Monitoring pod selector labels. These labels will be used to identify the Prometheus pods.
+ ##
+ metrics:
+ enabled: false
+ ## e.g:
+ ## namespaceSelector:
+ ## label: monitoring
+ ##
+ namespaceSelector: {}
+ ## e.g:
+ ## podSelector:
+ ## label: monitoring
+ ##
+ podSelector: {}
+ ## Ingress Rules
+ ##
+ ingressRules:
+ ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin.
+ ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s).
+ ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s).
+ ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules Custom network policy for the PostgreSQL primary node.
+ ##
+ primaryAccessOnlyFrom:
+ enabled: false
+ ## e.g:
+ ## namespaceSelector:
+ ## label: ingress
+ ##
+ namespaceSelector: {}
+ ## e.g:
+ ## podSelector:
+ ## label: access
+ ##
+ podSelector: {}
+ ## custom ingress rules
+ ## e.g:
+ ## customRules:
+ ## - from:
+ ## - namespaceSelector:
+ ## matchLabels:
+ ## label: example
+ ##
+ customRules: []
+ ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin.
+ ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s).
+ ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s).
+ ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules Custom network policy for the PostgreSQL read-only nodes.
+ ##
+ readReplicasAccessOnlyFrom:
+ enabled: false
+ ## e.g:
+ ## namespaceSelector:
+ ## label: ingress
+ ##
+ namespaceSelector: {}
+ ## e.g:
+ ## podSelector:
+ ## label: access
+ ##
+ podSelector: {}
+ ## custom ingress rules
+ ## e.g:
+ ## CustomRules:
+ ## - from:
+ ## - namespaceSelector:
+ ## matchLabels:
+ ## label: example
+ ##
+ customRules: []
+ ## @param networkPolicy.egressRules.denyConnectionsToExternal Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53).
+ ## @param networkPolicy.egressRules.customRules Custom network policy rule
+ ##
+ egressRules:
+ # Deny connections to external. This is not compatible with an external database.
+ denyConnectionsToExternal: false
+ ## Additional custom egress rules
+ ## e.g:
+ ## customRules:
+ ## - to:
+ ## - namespaceSelector:
+ ## matchLabels:
+ ## label: example
+ ##
+ customRules: []
+
+## @section Volume Permissions parameters
+##
+
+## Init containers parameters:
+## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node
+##
+volumePermissions:
+ ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume
+ ##
+ enabled: false
+ ## @param volumePermissions.image.registry Init container volume-permissions image registry
+ ## @param volumePermissions.image.repository Init container volume-permissions image repository
+ ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended)
+ ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+ ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy
+ ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets
+ ##
+ image:
+ registry: docker.io
+ repository: bitnami/bitnami-shell
+ tag: 11-debian-11-r120
+ digest: ""
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## Example:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+ ## Init container resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param volumePermissions.resources.limits Init container volume-permissions resource limits
+ ## @param volumePermissions.resources.requests Init container volume-permissions resource requests
+ ##
+ resources:
+ limits: {}
+ requests: {}
+ ## Init container' Security Context
+ ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser
+ ## and not the below volumePermissions.containerSecurityContext.runAsUser
+ ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container
+ ##
+ containerSecurityContext:
+ runAsUser: 0
+
+## @section Other Parameters
+##
+
+## @param serviceBindings.enabled Create secret for service binding (Experimental)
+## Ref: https://servicebinding.io/service-provider/
+##
+serviceBindings:
+ enabled: false
+
+## Service account for PostgreSQL to use.
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+##
+serviceAccount:
+ ## @param serviceAccount.create Enable creation of ServiceAccount for PostgreSQL pod
+ ##
+ create: false
+ ## @param serviceAccount.name The name of the ServiceAccount to use.
+ ## If not set and create is true, a name is generated using the common.names.fullname template
+ ##
+ name: ""
+ ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
+ ## Can be set to false if pods using this serviceAccount do not need to use K8s API
+ ##
+ automountServiceAccountToken: true
+ ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount
+ ##
+ annotations: {}
+## Creates role for ServiceAccount
+## @param rbac.create Create Role and RoleBinding (required for PSP to work)
+##
+rbac:
+ create: false
+ ## @param rbac.rules Custom RBAC rules to set
+ ## e.g:
+ ## rules:
+ ## - apiGroups:
+ ## - ""
+ ## resources:
+ ## - pods
+ ## verbs:
+ ## - get
+ ## - list
+ ##
+ rules: []
+## Pod Security Policy
+## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later
+##
+psp:
+ create: false
+
+## @section Metrics Parameters
+##
+
+metrics:
+ ## @param metrics.enabled Start a prometheus exporter
+ ##
+ enabled: false
+ ## @param metrics.image.registry PostgreSQL Prometheus Exporter image registry
+ ## @param metrics.image.repository PostgreSQL Prometheus Exporter image repository
+ ## @param metrics.image.tag PostgreSQL Prometheus Exporter image tag (immutable tags are recommended)
+ ## @param metrics.image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+ ## @param metrics.image.pullPolicy PostgreSQL Prometheus Exporter image pull policy
+ ## @param metrics.image.pullSecrets Specify image pull secrets
+ ##
+ image:
+ registry: docker.io
+ repository: bitnami/postgres-exporter
+ tag: 0.12.0-debian-11-r91
+ digest: ""
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## Example:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+ ## @param metrics.customMetrics Define additional custom metrics
+ ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file
+ ## customMetrics:
+ ## pg_database:
+ ## query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')"
+ ## metrics:
+ ## - name:
+ ## usage: "LABEL"
+ ## description: "Name of the database"
+ ## - size_bytes:
+ ## usage: "GAUGE"
+ ## description: "Size of the database in bytes"
+ ##
+ customMetrics: {}
+ ## @param metrics.extraEnvVars Extra environment variables to add to PostgreSQL Prometheus exporter
+ ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables
+ ## For example:
+ ## extraEnvVars:
+ ## - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS
+ ## value: "true"
+ ##
+ extraEnvVars: []
+ ## PostgreSQL Prometheus exporter containers' Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+ ## @param metrics.containerSecurityContext.enabled Enable PostgreSQL Prometheus exporter containers' Security Context
+ ## @param metrics.containerSecurityContext.runAsUser Set PostgreSQL Prometheus exporter containers' Security Context runAsUser
+ ## @param metrics.containerSecurityContext.runAsNonRoot Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot
+ ##
+ containerSecurityContext:
+ enabled: true
+ runAsUser: 1001
+ runAsNonRoot: true
+ ## Configure extra options for PostgreSQL Prometheus exporter containers' liveness, readiness and startup probes
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
+ ## @param metrics.livenessProbe.enabled Enable livenessProbe on PostgreSQL Prometheus exporter containers
+ ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+ ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe
+ ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+ ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe
+ ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe
+ ##
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## @param metrics.readinessProbe.enabled Enable readinessProbe on PostgreSQL Prometheus exporter containers
+ ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+ ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe
+ ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+ ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe
+ ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe
+ ##
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## @param metrics.startupProbe.enabled Enable startupProbe on PostgreSQL Prometheus exporter containers
+ ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
+ ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe
+ ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe
+ ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe
+ ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe
+ ##
+ startupProbe:
+ enabled: false
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 1
+ failureThreshold: 15
+ successThreshold: 1
+ ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one
+ ##
+ customLivenessProbe: {}
+ ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one
+ ##
+ customReadinessProbe: {}
+ ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one
+ ##
+ customStartupProbe: {}
+ ## @param metrics.containerPorts.metrics PostgreSQL Prometheus exporter metrics container port
+ ##
+ containerPorts:
+ metrics: 9187
+ ## PostgreSQL Prometheus exporter resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param metrics.resources.limits The resources limits for the PostgreSQL Prometheus exporter container
+ ## @param metrics.resources.requests The requested resources for the PostgreSQL Prometheus exporter container
+ ##
+ resources:
+ limits: {}
+ requests: {}
+ ## Service configuration
+ ##
+ service:
+ ## @param metrics.service.ports.metrics PostgreSQL Prometheus Exporter service port
+ ##
+ ports:
+ metrics: 9187
+ ## @param metrics.service.clusterIP Static clusterIP or None for headless services
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
+ ##
+ clusterIP: ""
+ ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin
+ ## Values: ClientIP or None
+ ## ref: https://kubernetes.io/docs/user-guide/services/
+ ##
+ sessionAffinity: None
+ ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint
+ ##
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}"
+ ## Prometheus Operator ServiceMonitor configuration
+ ##
+ serviceMonitor:
+ ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator
+ ##
+ enabled: false
+ ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace)
+ ##
+ namespace: ""
+ ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped.
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+ ##
+ interval: ""
+ ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+ ##
+ scrapeTimeout: ""
+ ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus
+ ##
+ labels: {}
+ ## @param metrics.serviceMonitor.selector Prometheus instance selector labels
+ ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
+ ##
+ selector: {}
+ ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
+ ##
+ relabelings: []
+ ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion
+ ##
+ metricRelabelings: []
+ ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint
+ ##
+ honorLabels: false
+ ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
+ ##
+ jobLabel: ""
+ ## Custom PrometheusRule to be defined
+ ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
+ ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
+ ##
+ prometheusRule:
+ ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator
+ ##
+ enabled: false
+ ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace)
+ ##
+ namespace: ""
+ ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus
+ ##
+ labels: {}
+ ## @param metrics.prometheusRule.rules PrometheusRule definitions
+ ## Make sure to constraint the rules to the current postgresql service.
+ ## rules:
+ ## - alert: HugeReplicationLag
+ ## expr: pg_replication_lag{service="{{ printf "%s-metrics" (include "common.names.fullname" .) }}"} / 3600 > 1
+ ## for: 1m
+ ## labels:
+ ## severity: critical
+ ## annotations:
+ ## description: replication for {{ include "common.names.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s).
+ ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s).
+ ##
+ rules: []
diff --git a/charts/qbittorrent/.helmignore b/charts/qbittorrent/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/qbittorrent/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/qbittorrent/Chart.yaml b/charts/qbittorrent/Chart.yaml
new file mode 100644
index 0000000..9124fba
--- /dev/null
+++ b/charts/qbittorrent/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: utorrent
+description: A Helm chart to run uTorrent server on PCloud
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/qbittorrent/templates/deploy.yaml b/charts/qbittorrent/templates/deploy.yaml
new file mode 100644
index 0000000..562139e
--- /dev/null
+++ b/charts/qbittorrent/templates/deploy.yaml
@@ -0,0 +1,90 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: torrent
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: torrent
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ protocol: TCP
+ - name: bittorrent-tcp
+ port: {{ .Values.bittorrent.port }}
+ targetPort: bittorrent
+ protocol: TCP
+ - name: bittorrent-udp
+ port: {{ .Values.bittorrent.port }}
+ targetPort: bittorrent
+ protocol: UDP
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: ingress
+ namespace: {{ .Release.Namespace }}
+spec:
+ ingressClassName: {{ .Values.ingress.className }}
+ tls:
+ - hosts:
+ - {{ .Values.ingress.domain }}
+ rules:
+ - host: {{ .Values.ingress.domain }}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: torrent
+ port:
+ name: http
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: torrent
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: torrent
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: torrent
+ spec:
+ volumes:
+ - name: data
+ persistentVolumeClaim:
+ claimName: data
+ containers:
+ - name: torrent
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: {{ .Values.webui.port }}
+ protocol: TCP
+ - name: bittorrent
+ containerPort: {{ .Values.bittorrent.port }}
+ volumeMounts:
+ - name: data
+ mountPath: /downloads
+ readOnly: false
+ # nodeSelector:
+ # "kubernetes.io/hostname": rpi05
+ # command:
+ # - torrent
+ # - --port=8080
+ # resources:
+ # requests:
+ # memory: "10Mi"
+ # cpu: "10m"
+ # limits:
+ # memory: "20Mi"
+ # cpu: "100m"
diff --git a/charts/qbittorrent/templates/pvc.yaml b/charts/qbittorrent/templates/pvc.yaml
new file mode 100644
index 0000000..77f42c3
--- /dev/null
+++ b/charts/qbittorrent/templates/pvc.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: data
+ namespace: {{ .Release.Namespace }}
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.storage.size }}
diff --git a/charts/qbittorrent/templates/samba-creds.yaml b/charts/qbittorrent/templates/samba-creds.yaml
new file mode 100644
index 0000000..48d07d8
--- /dev/null
+++ b/charts/qbittorrent/templates/samba-creds.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: qbittorrent-samba-creds
+ annotations:
+ kubed.appscode.com/sync: "pcloud-instance-id={{ .Values.pcloudInstanceId }}"
+type: Opaque
+data:
+ username: {{ .Values.samba.creds.username | b64enc }}
+ password: {{ .Values.samba.creds.password | b64enc }}
diff --git a/charts/qbittorrent/templates/samba.yaml b/charts/qbittorrent/templates/samba.yaml
new file mode 100644
index 0000000..e34b8e4
--- /dev/null
+++ b/charts/qbittorrent/templates/samba.yaml
@@ -0,0 +1,79 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: samba
+ labels:
+ app: samba
+spec:
+ type: LoadBalancer # ClusterIP
+ selector:
+ app: samba
+ ports:
+ - port: 445
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: samba
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: samba
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: samba
+ spec:
+ affinity:
+ podAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - torrent
+ topologyKey: "kubernetes.io/hostname"
+ volumes:
+ - name: data
+ persistentVolumeClaim:
+ claimName: data
+ containers:
+ - name: samba
+ image: {{ .Values.samba.image.repository }}:{{ .Values.samba.image.tag }}
+ imagePullPolicy: {{ .Values.samba.image.pullPolicy }}
+ env:
+ - name: PERMISSIONS
+ value: "0777"
+ - name: USERNAME
+ valueFrom:
+ secretKeyRef:
+ name: qbittorrent-samba-creds
+ key: username
+ - name: PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: qbittorrent-samba-creds
+ key: password
+ ports:
+ - containerPort: 139
+ - containerPort: 445
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ readOnly: false
+ args:
+ - -u
+ - $(USERNAME);$(PASSWORD)
+ - -s
+ - share;/data/;yes;no;no;all;none
+ - -p
+ # resources:
+ # requests:
+ # memory: "10Mi"
+ # cpu: "10m"
+ # limits:
+ # memory: "20Mi"
+ # cpu: "100m"
diff --git a/charts/qbittorrent/values.yaml b/charts/qbittorrent/values.yaml
new file mode 100644
index 0000000..3446d5c
--- /dev/null
+++ b/charts/qbittorrent/values.yaml
@@ -0,0 +1,22 @@
+pcloudInstanceId: example
+image:
+ repository: lscr.io/linuxserver/qbittorrent
+ tag: 4.5.3
+ pullPolicy: IfNotPresent
+ingress:
+ className: example-private
+ domain: utorrent.p.example.com
+webui:
+ port: 8080
+bittorrent:
+ port: 6881
+storage:
+ size: 100Gi
+samba:
+ image:
+ repository: dperson/samba
+ tag: latest
+ pullPolicy: IfNotPresent
+ creds:
+ username: foo
+ password: bar
diff --git a/charts/resource-renderer-controller/Chart.yaml b/charts/resource-renderer-controller/Chart.yaml
new file mode 100644
index 0000000..6d24782
--- /dev/null
+++ b/charts/resource-renderer-controller/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: resource-renderer-controller
+description: A Helm chart for resource-renderer-controller
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/resource-renderer-controller/templates/crds.yaml b/charts/resource-renderer-controller/templates/crds.yaml
new file mode 100644
index 0000000..7035603
--- /dev/null
+++ b/charts/resource-renderer-controller/templates/crds.yaml
@@ -0,0 +1,54 @@
+{{ if .Values.installCRDs }}
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.9.2
+ creationTimestamp: null
+ name: resourcerenderers.dodo.cloud.dodo.cloud
+spec:
+ group: dodo.cloud.dodo.cloud
+ names:
+ kind: ResourceRenderer
+ listKind: ResourceRendererList
+ plural: resourcerenderers
+ singular: resourcerenderer
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: ResourceRenderer is the Schema for the resourcerenderers API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ResourceRendererSpec defines the desired state of ResourceRenderer
+ properties:
+ resourceTemplate:
+ type: string
+ secretName:
+ description: Foo is an example field of ResourceRenderer. Edit resourcerenderer_types.go to remove/update
+ type: string
+ secretNamespace:
+ type: string
+ type: object
+ status:
+ description: ResourceRendererStatus defines the observed state of ResourceRenderer
+ properties:
+ ready:
+ description: 'INSERT ADDITIONAL STATUS FIELD - define observed state of cluster Important: Run "make" to regenerate code after modifying this file'
+ type: boolean
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+{{ end }}
diff --git a/charts/resource-renderer-controller/templates/install.yaml b/charts/resource-renderer-controller/templates/install.yaml
new file mode 100644
index 0000000..64af9de
--- /dev/null
+++ b/charts/resource-renderer-controller/templates/install.yaml
@@ -0,0 +1,294 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: resource-renderer-controller-manager
+ namespace: {{ .Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: resource-renderer-leader-election-role
+ namespace: {{ .Release.Namespace }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: resource-renderer-manager-role
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - dodo.cloud.dodo.cloud
+ resources:
+ - resourcerenderers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - dodo.cloud.dodo.cloud
+ resources:
+ - resourcerenderers/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - dodo.cloud.dodo.cloud
+ resources:
+ - resourcerenderers/status
+ verbs:
+ - get
+ - patch
+ - update
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: resource-renderer-metrics-reader
+rules:
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: resource-renderer-proxy-role
+rules:
+- apiGroups:
+ - authentication.k8s.io
+ resources:
+ - tokenreviews
+ verbs:
+ - create
+- apiGroups:
+ - authorization.k8s.io
+ resources:
+ - subjectaccessreviews
+ verbs:
+ - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: resource-renderer-leader-election-rolebinding
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: resource-renderer-leader-election-role
+subjects:
+- kind: ServiceAccount
+ name: resource-renderer-controller-manager
+ namespace: {{ .Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: resource-renderer-manager-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: resource-renderer-manager-role
+subjects:
+- kind: ServiceAccount
+ name: resource-renderer-controller-manager
+ namespace: {{ .Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: resource-renderer-proxy-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: resource-renderer-proxy-role
+subjects:
+- kind: ServiceAccount
+ name: resource-renderer-controller-manager
+ namespace: {{ .Release.Namespace }}
+---
+apiVersion: v1
+data:
+ controller_manager_config.yaml: |
+ apiVersion: controller-runtime.sigs.k8s.io/v1alpha1
+ kind: ControllerManagerConfig
+ health:
+ healthProbeBindAddress: :8081
+ metrics:
+ bindAddress: 127.0.0.1:8080
+ webhook:
+ port: 9443
+ leaderElection:
+ leaderElect: true
+ resourceName: 798a733c.dodo.cloud
+ # leaderElectionReleaseOnCancel defines if the leader should step down volume
+ # when the Manager ends. This requires the binary to immediately end when the
+ # Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
+ # speeds up voluntary leader transitions as the new leader don't have to wait
+ # LeaseDuration time first.
+ # In the default scaffold provided, the program ends immediately after
+ # the manager stops, so would be fine to enable this option. However,
+ # if you are doing or is intended to do any operation such as perform cleanups
+ # after the manager stops then its usage might be unsafe.
+ # leaderElectionReleaseOnCancel: true
+kind: ConfigMap
+metadata:
+ name: resource-renderer-manager-config
+ namespace: {{ .Release.Namespace }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ control-plane: controller-manager
+ name: resource-renderer-controller-manager-metrics-service
+ namespace: {{ .Release.Namespace }}
+spec:
+ ports:
+ - name: https
+ port: 8443
+ protocol: TCP
+ targetPort: https
+ selector:
+ control-plane: controller-manager
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ control-plane: controller-manager
+ name: resource-renderer-controller-manager
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ control-plane: controller-manager
+ template:
+ metadata:
+ annotations:
+ kubectl.kubernetes.io/default-container: manager
+ labels:
+ control-plane: controller-manager
+ spec:
+ containers:
+ - args:
+ - --secure-listen-address=0.0.0.0:8443
+ - --upstream=http://127.0.0.1:8080/
+ - --logtostderr=true
+ - --v=0
+ image: {{ .Values.kubeRBACProxy.image.repository }}:{{ .Values.kubeRBACProxy.image.tag }}
+ name: kube-rbac-proxy
+ ports:
+ - containerPort: 8443
+ name: https
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 500m
+ memory: 128Mi
+ requests:
+ cpu: 5m
+ memory: 64Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ - args:
+ - --health-probe-bind-address=:8081
+ - --metrics-bind-address=127.0.0.1:8080
+ - --leader-elect
+ command:
+ - /manager
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 8081
+ initialDelaySeconds: 15
+ periodSeconds: 20
+ name: manager
+ readinessProbe:
+ httpGet:
+ path: /readyz
+ port: 8081
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ resources:
+ limits:
+ cpu: 500m
+ memory: 128Mi
+ requests:
+ cpu: 10m
+ memory: 64Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ securityContext:
+ runAsNonRoot: true
+ serviceAccountName: resource-renderer-controller-manager
+ terminationGracePeriodSeconds: 10
diff --git a/charts/resource-renderer-controller/values.yaml b/charts/resource-renderer-controller/values.yaml
new file mode 100644
index 0000000..d424d0a
--- /dev/null
+++ b/charts/resource-renderer-controller/values.yaml
@@ -0,0 +1,10 @@
+image:
+ repository: "giolekva/resource-renderer-controller"
+ tag: latest
+ pullPolicy: Always
+kubeRBACProxy:
+ image:
+ repository: "gcr.io/kubebuilder/kube-rbac-proxy"
+ tag: v0.13.0
+ pullPolicy: IfNotPresent
+installCRDs: false
diff --git a/charts/resource-renderer/.helmignore b/charts/resource-renderer/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/resource-renderer/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/resource-renderer/Chart.yaml b/charts/resource-renderer/Chart.yaml
new file mode 100644
index 0000000..1360ef4
--- /dev/null
+++ b/charts/resource-renderer/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: resource-renderer
+description: A Helm chart to configure resource-renderer
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/resource-renderer/templates/install.yaml b/charts/resource-renderer/templates/install.yaml
new file mode 100644
index 0000000..9c7495d
--- /dev/null
+++ b/charts/resource-renderer/templates/install.yaml
@@ -0,0 +1,8 @@
+apiVersion: dodo.cloud.dodo.cloud/v1
+kind: ResourceRenderer
+metadata:
+ name: {{ .Values.name }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ secretName: {{ .Values.secretName }}
+ resourceTemplate: {{ toYaml .Values.resourceTemplate | indent 2 }}
diff --git a/charts/resource-renderer/values.yaml b/charts/resource-renderer/values.yaml
new file mode 100644
index 0000000..6fbb1af
--- /dev/null
+++ b/charts/resource-renderer/values.yaml
@@ -0,0 +1,3 @@
+name: render-configmap
+secretName: foo
+resourceTemplate: ""
diff --git a/charts/rpuppy/.helmignore b/charts/rpuppy/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/rpuppy/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/rpuppy/Chart.yaml b/charts/rpuppy/Chart.yaml
new file mode 100644
index 0000000..2895802
--- /dev/null
+++ b/charts/rpuppy/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: rpuppy
+description: A Helm chart for rpuppy
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/rpuppy/templates/install.yaml b/charts/rpuppy/templates/install.yaml
new file mode 100644
index 0000000..1d70bd5
--- /dev/null
+++ b/charts/rpuppy/templates/install.yaml
@@ -0,0 +1,53 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: rpuppy
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: rpuppy
+ ports:
+ - name: {{ .Values.portName }}
+ port: 80
+ targetPort: {{ .Values.portName }}
+ protocol: TCP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: rpuppy
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: rpuppy
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: rpuppy
+ spec:
+ containers:
+ - name: rpuppy
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: {{ .Values.portName }}
+ containerPort: 8080
+ protocol: TCP
+ command:
+ - /usr/bin/rpuppy
+ - --port=8080
+ resources:
+ requests:
+ memory: "10Mi"
+ cpu: "10m"
+ limits:
+ memory: "20Mi"
+ cpu: "100m"
+ tolerations:
+ - key: "pcloud"
+ operator: "Equal"
+ value: "role"
+ effect: "NoSchedule"
diff --git a/charts/rpuppy/values.yaml b/charts/rpuppy/values.yaml
new file mode 100644
index 0000000..0830d1e
--- /dev/null
+++ b/charts/rpuppy/values.yaml
@@ -0,0 +1,5 @@
+image:
+ repository: giolekva/rpuppy
+ tag: latest
+ pullPolicy: Always
+portName: http
diff --git a/charts/service/.helmignore b/charts/service/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/service/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/service/Chart.yaml b/charts/service/Chart.yaml
new file mode 100644
index 0000000..6141c00
--- /dev/null
+++ b/charts/service/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: rpuppy
+description: A Helm chart for Kubernetes service definition
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/service/templates/install.yaml b/charts/service/templates/install.yaml
new file mode 100644
index 0000000..1871f44
--- /dev/null
+++ b/charts/service/templates/install.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Values.name }}
+ namespace: {{ .Release.Namespace }}
+ {{- if or .Values.annotations }}
+ annotations:
+ {{- toYaml .Values.annotations | nindent 4 }}
+ {{- end }}
+spec:
+ type: {{ .Values.type }}
+ selector:
+ {{- toYaml .Values.selector | nindent 4 }}
+ ports:
+ {{- toYaml .Values.ports | nindent 4 }}
diff --git a/charts/service/values.yaml b/charts/service/values.yaml
new file mode 100644
index 0000000..b476b59
--- /dev/null
+++ b/charts/service/values.yaml
@@ -0,0 +1,5 @@
+name: "example"
+type: "ClusterIP"
+ports: {}
+selector: {}
+annotations: {}
diff --git a/charts/soft-serve/.helmignore b/charts/soft-serve/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/soft-serve/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/soft-serve/Chart.yaml b/charts/soft-serve/Chart.yaml
new file mode 100644
index 0000000..8afa8ca
--- /dev/null
+++ b/charts/soft-serve/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: soft-serve
+description: A Helm chart for git server to store PCloud configuration
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/soft-serve/templates/ingress.yaml b/charts/soft-serve/templates/ingress.yaml
new file mode 100644
index 0000000..490a5b3
--- /dev/null
+++ b/charts/soft-serve/templates/ingress.yaml
@@ -0,0 +1,31 @@
+{{ if .Values.ingress.enabled }}
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: ingress
+ namespace: {{ .Release.Namespace }}
+ {{- if .Values.ingress.certificateIssuer }}
+ annotations:
+ acme.cert-manager.io/http01-edit-in-place: "true"
+ cert-manager.io/cluster-issuer: {{ .Values.ingress.certificateIssuer }}
+ {{- end }}
+spec:
+ ingressClassName: {{ .Values.ingress.ingressClassName }}
+ {{- if .Values.ingress.certificateIssuer }}
+ tls:
+ - hosts:
+ - {{ .Values.ingress.domain }}
+ secretName: cert-soft-serve
+ {{- end }}
+ rules:
+ - host: {{ .Values.ingress.domain }}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: soft-serve
+ port:
+ name: http
+{{ end }}
diff --git a/charts/soft-serve/templates/keys.yaml b/charts/soft-serve/templates/keys.yaml
new file mode 100644
index 0000000..6ba4849
--- /dev/null
+++ b/charts/soft-serve/templates/keys.yaml
@@ -0,0 +1,10 @@
+{{ if and .Values.privateKey .Values.publicKey }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: keys
+ namespace: {{ .Release.Namespace }}
+data:
+ key: {{ toYaml .Values.privateKey | indent 2 }}
+ key.pub: {{ toYaml .Values.publicKey | indent 2 }}
+{{ end }}
diff --git a/charts/soft-serve/templates/service-transport.yaml b/charts/soft-serve/templates/service-transport.yaml
new file mode 100644
index 0000000..969fbbe
--- /dev/null
+++ b/charts/soft-serve/templates/service-transport.yaml
@@ -0,0 +1,13 @@
+{{ if .Values.ingress.enabled }}
+apiVersion: transport.dodo.cloud/v1
+kind: ServiceTransport
+metadata:
+ name: ingress-transport
+ namespace: {{ .Release.Namespace }}
+spec:
+ port: {{ .Values.port }}
+ sourcePort: {{ .Values.ingress.sourcePort }}
+ protocol: TCP
+ service: soft-serve
+ ingressClassName: {{ .Values.ingress.ingressClassName }}
+{{ end }}
diff --git a/charts/soft-serve/templates/service.yaml b/charts/soft-serve/templates/service.yaml
new file mode 100644
index 0000000..9e54b46
--- /dev/null
+++ b/charts/soft-serve/templates/service.yaml
@@ -0,0 +1,26 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: soft-serve
+ namespace: {{ .Release.Namespace }}
+ {{ if eq .Values.serviceType "LoadBalancer" }}
+ annotations:
+ {{ if .Values.reservedIP }}
+ metallb.universe.tf/loadBalancerIPs: {{ .Values.reservedIP }}
+ {{ end }}
+ {{ if .Values.addressPool }}
+ metallb.universe.tf/address-pool: {{ .Values.addressPool }}
+ {{ end }}
+ {{ end }}
+spec:
+ type: {{ .Values.serviceType }}
+ selector:
+ app: soft-serve
+ ports:
+ - name: ssh
+ port: {{ .Values.port }}
+ protocol: TCP
+ - name: http
+ port: 80 # TODO(gio): make configurable
+ targetPort: http
+ protocol: TCP
diff --git a/charts/soft-serve/templates/stateful-set.yaml b/charts/soft-serve/templates/stateful-set.yaml
new file mode 100644
index 0000000..e5b32eb
--- /dev/null
+++ b/charts/soft-serve/templates/stateful-set.yaml
@@ -0,0 +1,67 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: soft-serve
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: soft-serve
+ serviceName: soft-serve
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: soft-serve
+ spec:
+ volumes:
+ - name: data
+ persistentVolumeClaim:
+ claimName: soft-serve
+ {{ if and .Values.privateKey .Values.publicKey }}
+ - name: keys
+ configMap:
+ name: keys
+ {{ end }}
+ containers:
+ - name: soft-serve
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy}}
+ env:
+ - name: SOFT_SERVE_SSH_LISTEN_ADDR
+ value: ":{{ .Values.port }}"
+ - name: SOFT_SERVE_SSH_PUBLIC_URL
+ value: "ssh://{{ .Values.ingress.domain }}:{{ .Values.sshPublicPort }}"
+ - name: SOFT_SERVE_INITIAL_ADMIN_KEYS
+ value: |-
+{{ indent 12 .Values.adminKey }}
+ {{ if and .Values.privateKey .Values.publicKey }}
+ - name: SOFT_SERVE_SSH_KEY_PATH
+ value: /.ssh/key
+ {{ end }}
+ - name: SOFT_SERVE_DATA_PATH
+ value: /var/lib/soft-serve/repos
+ - name: SOFT_SERVE_HTTP_LISTEN_ADDR
+ value: ":80"
+ - name: SOFT_SERVE_HTTP_PUBLIC_URL
+ value: "http://{{ .Values.ingress.domain }}"
+ - name: SOFT_SERVE_ALLOW_KEYLESS
+ value: "{{ .Values.allowKeyless }}"
+ - name: SOFT_SERVE_ANON_ACCESS
+ value: "{{ .Values.anonAccess }}"
+ ports:
+ - name: ssh
+ containerPort: {{ .Values.port }}
+ protocol: TCP
+ - name: http
+ containerPort: 80
+ protocol: TCP
+ volumeMounts:
+ - name: data
+ mountPath: /var/lib/soft-serve
+ readOnly: false
+ {{ if and .Values.privateKey .Values.publicKey }}
+ - name: keys
+ mountPath: /.ssh
+ readOnly: true
+ {{ end }}
diff --git a/charts/soft-serve/templates/volume.yaml b/charts/soft-serve/templates/volume.yaml
new file mode 100644
index 0000000..f5d0bfc
--- /dev/null
+++ b/charts/soft-serve/templates/volume.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: soft-serve
+ namespace: {{ .Release.Namespace }}
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.storage.size }}
diff --git a/charts/soft-serve/values.yaml b/charts/soft-serve/values.yaml
new file mode 100644
index 0000000..cd042fc
--- /dev/null
+++ b/charts/soft-serve/values.yaml
@@ -0,0 +1,22 @@
+image:
+ repository: charmcli/soft-serve
+ tag: latest
+ pullPolicy: IfNotPresent
+storage:
+ size: 1Gi
+port: 22
+sshPublicPort: 22
+privateKey: ""
+publicKey: ""
+adminKey: ""
+serviceType: LoadBalancer
+reservedIP: ""
+addressPool: ""
+ingress:
+ enabled: false
+ domain: git.p.example.com
+ ingressClassName: example-ingress-private
+ certificateIssuer: ""
+ sourcePort: 0
+allowKeyless: false
+anonAccess: "no-access"
diff --git a/charts/tailscale-proxy/.helmignore b/charts/tailscale-proxy/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/tailscale-proxy/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/tailscale-proxy/Chart.yaml b/charts/tailscale-proxy/Chart.yaml
new file mode 100644
index 0000000..a87d9d7
--- /dev/null
+++ b/charts/tailscale-proxy/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: tailscale-proxy
+description: A Helm chart to run tailscale node
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/tailscale-proxy/templates/install.yaml b/charts/tailscale-proxy/templates/install.yaml
new file mode 100644
index 0000000..76241da
--- /dev/null
+++ b/charts/tailscale-proxy/templates/install.yaml
@@ -0,0 +1,97 @@
+# apiVersion: v1
+# kind: PersistentVolumeClaim
+# metadata:
+# name: tailscale
+# namespace: {{ .Release.Namespace }}
+# annotations:
+# helm.sh/resource-policy: keep
+# spec:
+# accessModes:
+# - ReadWriteOnce
+# resources:
+# requests:
+# storage: 1Gi
+---
+apiVersion: headscale.dodo.cloud/v1
+kind: HeadscaleUser
+metadata:
+ name: {{ .Values.username }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ headscaleAddress: {{ .Values.apiServer }}
+ name: {{ .Values.username }}
+ preAuthKey:
+ enabled: true
+ secretName: {{ .Values.preAuthKeySecret }}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: tailscale
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: tailscale
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: tailscale
+ spec:
+ # volumes:
+ # - name: tailscale
+ # persistentVolumeClaim:
+ # claimName: tailscale
+ containers:
+ - name: tailscale
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ securityContext:
+ privileged: true
+ capabilities:
+ add:
+ - NET_ADMIN
+ env:
+ - name: TS_KUBE_SECRET
+ value: {{ .Values.preAuthKeySecret }}
+ # - name: TS_STATE_DIR
+ # value: /tailscale-state
+ # - name: TS_AUTHKEY
+ # valueFrom:
+ # secretKeyRef:
+ # name: {{ .Values.preAuthKeySecret }}
+ # key: key
+ - name: TS_HOSTNAME
+ value: {{ .Values.hostname }}
+ - name: TS_ROUTES
+ value: {{ .Values.ipSubnet }}
+ - name: TS_EXTRA_ARGS
+ value: --login-server={{ .Values.loginServer }}
+ # volumeMounts:
+ # - name: tailscale
+ # mountPath: /tailscale-state
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: secrets
+ namespace: {{ .Release.Namespace }}
+rules:
+- apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "watch", "list", "patch", "update"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: secrets
+ namespace: {{ .Release.Namespace }}
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: Role
+ name: secrets
+ apiGroup: rbac.authorization.k8s.io
diff --git a/charts/tailscale-proxy/values.yaml b/charts/tailscale-proxy/values.yaml
new file mode 100644
index 0000000..82a6928
--- /dev/null
+++ b/charts/tailscale-proxy/values.yaml
@@ -0,0 +1,10 @@
+image:
+ repository: tailscale/tailscale
+ tag: v1.42.0
+ pullPolicy: IfNotPresent
+username: example
+hostname: example-ingress
+apiServer: http://headscale-api.example-app-headscale.svc.cluster.local
+loginServer: https://headscale.example.com
+ipSubnet: 10.1.0.1/24
+preAuthKeySecret: example-key
diff --git a/charts/url-shortener/.helmignore b/charts/url-shortener/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/url-shortener/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/url-shortener/Chart.yaml b/charts/url-shortener/Chart.yaml
new file mode 100644
index 0000000..bb2151d
--- /dev/null
+++ b/charts/url-shortener/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: url-shortener
+description: A Helm chart for URL shortener application
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/url-shortener/templates/install.yaml b/charts/url-shortener/templates/install.yaml
new file mode 100644
index 0000000..a0fc858
--- /dev/null
+++ b/charts/url-shortener/templates/install.yaml
@@ -0,0 +1,49 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: url-shortener
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: url-shortener
+ ports:
+ - name: {{ .Values.portName }}
+ protocol: TCP
+ port: 80
+ targetPort: {{ .Values.portName }}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: url-shortener
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: url-shortener
+ template:
+ metadata:
+ labels:
+ app: url-shortener
+ spec:
+ containers:
+ - name: url-shortener
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ ports:
+ - name: {{ .Values.portName }}
+ containerPort: 8080
+ protocol: TCP
+ command:
+ - url-shortener
+ - --port=8080
+ - --db-path=/data/urls.db
+ - --require-auth={{ .Values.requireAuth }}
+ volumeMounts:
+ - name: url-shortener
+ mountPath: /data
+ volumes:
+ - name: url-shortener
+ persistentVolumeClaim:
+ claimName: url-shortener
diff --git a/charts/url-shortener/templates/volume.yaml b/charts/url-shortener/templates/volume.yaml
new file mode 100644
index 0000000..992e601
--- /dev/null
+++ b/charts/url-shortener/templates/volume.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: url-shortener
+ namespace: {{ .Release.Namespace }}
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.storage.size }}
diff --git a/charts/url-shortener/values.yaml b/charts/url-shortener/values.yaml
new file mode 100644
index 0000000..dd3da7d
--- /dev/null
+++ b/charts/url-shortener/values.yaml
@@ -0,0 +1,8 @@
+image:
+ repository: giolekva/url-shortener
+ tag: latest
+ pullPolicy: Always
+storage:
+ size: 1Gi
+portName: http
+requireAuth: false
diff --git a/charts/vaultwarden/.helmignore b/charts/vaultwarden/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/vaultwarden/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/vaultwarden/Chart.yaml b/charts/vaultwarden/Chart.yaml
new file mode 100644
index 0000000..59cbfa8
--- /dev/null
+++ b/charts/vaultwarden/Chart.yaml
@@ -0,0 +1,24 @@
+apiVersion: v2
+name: vaultwarden
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+# It is recommended to use it with quotes.
+appVersion: "1.16.0"
diff --git a/charts/vaultwarden/templates/install.yaml b/charts/vaultwarden/templates/install.yaml
new file mode 100644
index 0000000..3305d6a
--- /dev/null
+++ b/charts/vaultwarden/templates/install.yaml
@@ -0,0 +1,86 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: data
+ namespace: {{ .Release.Namespace }}
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.storage.size }}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: server
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: server
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: server
+ spec:
+ volumes:
+ - name: data
+ persistentVolumeClaim:
+ claimName: data
+ containers:
+ - name: server
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ - name: ROCKET_PORT
+ value: "80"
+ - name: DISABLE_ADMIN_TOKEN
+ value: "true"
+ - name: DOMAIN
+ value: https://{{ .Values.domain }}
+ ports:
+ - name: http
+ containerPort: 80
+ protocol: TCP
+ volumeMounts:
+ - name: data
+ mountPath: /data
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: server
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: server
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: ingress
+ namespace: {{ .Release.Namespace }}
+spec:
+ ingressClassName: {{ .Values.ingressClassName }}
+ tls:
+ - hosts:
+ - {{ .Values.domain }}
+ rules:
+ - host: {{ .Values.domain }}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: server
+ port:
+ name: http
diff --git a/charts/vaultwarden/values.yaml b/charts/vaultwarden/values.yaml
new file mode 100644
index 0000000..8bd6296
--- /dev/null
+++ b/charts/vaultwarden/values.yaml
@@ -0,0 +1,9 @@
+image:
+ repository: vaultwarden/server
+ tag: 1.28.1
+ pullPolicy: IfNotPresent
+storage:
+ size: 1Gi
+domain: bitwarden.example.com
+certificateIssuer: private
+ingressClassName: ingress-private
diff --git a/charts/volumes/.helmignore b/charts/volumes/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/volumes/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/volumes/Chart.yaml b/charts/volumes/Chart.yaml
new file mode 100644
index 0000000..70093c7
--- /dev/null
+++ b/charts/volumes/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: volumes
+description: A Helm chart for creating PCloud volumes
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/volumes/templates/pvc.yaml b/charts/volumes/templates/pvc.yaml
new file mode 100644
index 0000000..554ce62
--- /dev/null
+++ b/charts/volumes/templates/pvc.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{ .Values.name }}
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ helm.sh/resource-policy: keep
+spec:
+ accessModes:
+ - {{ .Values.accessMode }}
+ resources:
+ requests:
+ storage: {{ .Values.size }}
diff --git a/charts/volumes/values.yaml b/charts/volumes/values.yaml
new file mode 100644
index 0000000..859154b
--- /dev/null
+++ b/charts/volumes/values.yaml
@@ -0,0 +1,3 @@
+name: data
+accessMode: ReadWriteOnce
+size: 1Gi
diff --git a/charts/vpn-mesh-config/.helmignore b/charts/vpn-mesh-config/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/vpn-mesh-config/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/vpn-mesh-config/Chart.yaml b/charts/vpn-mesh-config/Chart.yaml
new file mode 100644
index 0000000..cb9112a
--- /dev/null
+++ b/charts/vpn-mesh-config/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: vpn-mesh-config
+description: A Helm chart for PCloud internal VPN mesh network configuration
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/vpn-mesh-config/templates/api.yaml b/charts/vpn-mesh-config/templates/api.yaml
new file mode 100644
index 0000000..a747f4f
--- /dev/null
+++ b/charts/vpn-mesh-config/templates/api.yaml
@@ -0,0 +1,104 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: nebula-api
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: LoadBalancer
+ selector:
+ app: nebula-api
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ protocol: TCP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nebula-api
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: nebula-api
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: nebula-api
+ annotations:
+ checksum/config: {{ include (print $.Template.BasePath "/lighthouse-config-template.yaml") . | sha256sum }}
+ spec:
+ volumes:
+ - name: config
+ configMap:
+ name: lighthouse-config-template
+ containers:
+ - name: api
+ image: {{ .Values.api.image.repository }}:{{ .Values.api.image.tag }}
+ imagePullPolicy: {{ .Values.api.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: 8080
+ protocol: TCP
+ command:
+ - nebula-api
+ - --port=8080
+ - --namespace={{ .Release.Namespace }}
+ - --ca-name={{ .Values.certificateAuthority.name }}
+ - --config-tmpl=/etc/nebula-api/config/lighthouse.yaml
+ volumeMounts:
+ - name: config
+ mountPath: /etc/nebula-api/config
+ readOnly: true
+ resources:
+ requests:
+ memory: "10Mi"
+ cpu: "10m"
+ limits:
+ memory: "20Mi"
+ cpu: "100m"
+ tolerations:
+ - key: "pcloud"
+ operator: "Equal"
+ value: "role"
+ effect: "NoSchedule"
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: ingress
+ namespace: {{ .Release.Namespace }}
+spec:
+ ingressClassName: {{ .Values.ingressClassName }}
+ tls:
+ - hosts:
+ - vpn.{{ .Values.domain }}
+ secretName: cert-vpn.{{ .Values.domain }}
+ rules:
+ - host: vpn.{{ .Values.domain }}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: nebula-api
+ port:
+ name: http
+---
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: vpn.{{ .Values.domain }}
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ helm.sh/resource-policy: keep
+spec:
+ dnsNames:
+ - 'vpn.{{ .Values.domain }}'
+ issuerRef:
+ name: {{ .Values.certificateIssuer }}
+ kind: ClusterIssuer
+ secretName: cert-vpn.{{ .Values.domain }}
diff --git a/charts/vpn-mesh-config/templates/certificate-authority.yaml b/charts/vpn-mesh-config/templates/certificate-authority.yaml
new file mode 100644
index 0000000..90e3f9b
--- /dev/null
+++ b/charts/vpn-mesh-config/templates/certificate-authority.yaml
@@ -0,0 +1,9 @@
+apiVersion: lekva.me/v1
+kind: NebulaCA
+metadata:
+ name: {{ .Values.certificateAuthority.name }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ secretName: {{ .Values.certificateAuthority.secretName }}
+
+
diff --git a/charts/vpn-mesh-config/templates/lighthouse-config-template.yaml b/charts/vpn-mesh-config/templates/lighthouse-config-template.yaml
new file mode 100644
index 0000000..f76f526
--- /dev/null
+++ b/charts/vpn-mesh-config/templates/lighthouse-config-template.yaml
@@ -0,0 +1,48 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: lighthouse-config-template
+ namespace: {{ .Release.Namespace }}
+data:
+ lighthouse.yaml: |
+ pki:
+ ca: ""
+ cert: ""
+ key: ""
+ static_host_map:
+ "{{ .Values.lighthouse.internalIP }}": ["{{ .Values.lighthouse.externalIP }}:{{ .Values.lighthouse.port }}"]
+ lighthouse:
+ am_lighthouse: false
+ interval: 60
+ hosts:
+ - "{{ .Values.lighthouse.internalIP }}"
+ listen:
+ host: "[::]"
+ port: 4242
+ punchy:
+ punch: true
+ cipher: chachapoly
+ tun:
+ disabled: false
+ dev: pcloud0
+ drop_local_broadcast: false
+ drop_multicast: false
+ tx_queue: 500
+ mtu: 576
+ logging:
+ level: debug
+ format: text
+ firewall:
+ conntrack:
+ tcp_timeout: 12m
+ udp_timeout: 3m
+ default_timeout: 10m
+ max_connections: 100000
+ outbound:
+ - port: any
+ proto: any
+ host: any
+ inbound:
+ - port: any
+ proto: any
+ host: any
diff --git a/charts/vpn-mesh-config/templates/lighthouse-config.yaml b/charts/vpn-mesh-config/templates/lighthouse-config.yaml
new file mode 100644
index 0000000..7ce6c0a
--- /dev/null
+++ b/charts/vpn-mesh-config/templates/lighthouse-config.yaml
@@ -0,0 +1,46 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: lighthouse-config
+ namespace: {{ .Release.Namespace }}
+data:
+ lighthouse.yaml: |
+ pki:
+ ca: /etc/nebula/lighthouse/ca.crt
+ cert: /etc/nebula/lighthouse/host.crt
+ key: /etc/nebula/lighthouse/host.key
+ static_host_map:
+ "{{ .Values.lighthouse.internalIP }}": ["{{ .Values.lighthouse.externalIP }}:{{ .Values.lighthouse.port }}"]
+ lighthouse:
+ am_lighthouse: true
+ interval: 60
+ listen:
+ host: "[::]"
+ port: {{ .Values.lighthouse.port }}
+ punchy:
+ punch: true
+ cipher: chachapoly
+ tun:
+ disabled: false
+ dev: nebula1
+ drop_local_broadcast: false
+ drop_multicast: false
+ tx_queue: 500
+ mtu: 1300
+ logging:
+ level: info
+ format: text
+ firewall:
+ conntrack:
+ tcp_timeout: 12m
+ udp_timeout: 3m
+ default_timeout: 10m
+ max_connections: 100000
+ outbound:
+ - port: any
+ proto: any
+ host: any
+ inbound:
+ - port: any
+ proto: any
+ host: any
diff --git a/charts/vpn-mesh-config/templates/lighthouse-service.yaml b/charts/vpn-mesh-config/templates/lighthouse-service.yaml
new file mode 100644
index 0000000..b23d99d
--- /dev/null
+++ b/charts/vpn-mesh-config/templates/lighthouse-service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: lighthouse
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: LoadBalancer
+ externalTrafficPolicy: Local
+ selector:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/instance: ingress-private
+ app.kubernetes.io/component: controller
+ ports:
+ - name: lighthouse
+ port: {{ .Values.lighthouse.port }}
+ targetPort: nebula
+ protocol: UDP
diff --git a/charts/vpn-mesh-config/templates/nebula-node.yaml b/charts/vpn-mesh-config/templates/nebula-node.yaml
new file mode 100644
index 0000000..d4bc6d2
--- /dev/null
+++ b/charts/vpn-mesh-config/templates/nebula-node.yaml
@@ -0,0 +1,10 @@
+apiVersion: lekva.me/v1
+kind: NebulaNode
+metadata:
+ name: {{ .Values.lighthouse.name }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ caName: {{ .Values.certificateAuthority.name }}
+ caNamespace: {{ .Release.Namespace }}
+ ipCidr: {{ .Values.lighthouse.internalIP }}/24
+ secretName: {{ .Values.lighthouse.secretName }}
diff --git a/charts/vpn-mesh-config/templates/role.yaml b/charts/vpn-mesh-config/templates/role.yaml
new file mode 100644
index 0000000..c48ab48
--- /dev/null
+++ b/charts/vpn-mesh-config/templates/role.yaml
@@ -0,0 +1,42 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ .Release.Namespace }}-nebula-api
+ namespace: {{ .Release.Namespace }}
+rules:
+- apiGroups:
+ - "lekva.me"
+ resources:
+ - nebulacas
+ - nebulacas/status
+ - nebulanodes
+ - nebulanodes/status
+ verbs:
+ - list
+ - get
+ - create
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - list
+ - get
+ - create
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ .Release.Namespace }}-nebula-api
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ .Release.Namespace }}-nebula-api
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: {{ .Release.Namespace }}
diff --git a/charts/vpn-mesh-config/values.yaml b/charts/vpn-mesh-config/values.yaml
new file mode 100644
index 0000000..0174b32
--- /dev/null
+++ b/charts/vpn-mesh-config/values.yaml
@@ -0,0 +1,20 @@
+domain: example.com
+ingressClassName: pcloud-ingress-public
+certificateIssuer: example-public
+
+certificateAuthority:
+ name: "nebula"
+ secretName: "ca-nebula-cert"
+
+lighthouse:
+ name: "lighthouse"
+ secretName: "node-lighthouse-cert"
+ internalIP: "0.0.0.0"
+ externalIP: "0.0.0.0"
+ port: "4242"
+
+api:
+ image:
+ repository: giolekva/nebula-api
+ tag: latest
+ pullPolicy: Always
diff --git a/charts/welcome/.helmignore b/charts/welcome/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/welcome/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/welcome/Chart.yaml b/charts/welcome/Chart.yaml
new file mode 100644
index 0000000..ad3ce6d
--- /dev/null
+++ b/charts/welcome/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: welcome
+description: A Helm chart for PCloud welcome
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/welcome/templates/install.yaml b/charts/welcome/templates/install.yaml
new file mode 100644
index 0000000..81898d4
--- /dev/null
+++ b/charts/welcome/templates/install.yaml
@@ -0,0 +1,118 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ .Values.clusterRoleName }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ .Values.clusterRoleName }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ .Values.clusterRoleName }}
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: {{ .Release.Namespace }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: ssh-key
+type: Opaque
+data:
+ private: {{ .Values.sshPrivateKey }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: welcome
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: ClusterIP
+ selector:
+ app: welcome
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: ingress
+ namespace: {{ .Release.Namespace }}
+ {{- if .Values.ingress.certificateIssuer }}
+ annotations:
+ acme.cert-manager.io/http01-edit-in-place: "true"
+ cert-manager.io/cluster-issuer: {{ .Values.ingress.certificateIssuer}}
+ {{- end }}
+spec:
+ ingressClassName: {{ .Values.ingress.className }}
+ {{- if .Values.ingress.certificateIssuer }}
+ tls:
+ - hosts:
+ - {{ .Values.ingress.domain }}
+ secretName: cert-welcome
+ {{- end }}
+ rules:
+ - host: {{ .Values.ingress.domain }}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: welcome
+ port:
+ name: http
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: welcome
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ app: welcome
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: welcome
+ spec:
+ volumes:
+ - name: ssh-key
+ secret:
+ secretName: ssh-key
+ containers:
+ - name: welcome
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: 8080
+ protocol: TCP
+ command:
+ - pcloud-installer
+ - welcome
+ - --repo-addr={{ .Values.repoAddr }}
+ - --ssh-key=/pcloud/ssh-key/private
+ - --port=8080
+ - --create-account-addr={{ .Values.createAccountAddr }}
+ - --login-addr={{ .Values.loginAddr }}
+ - --memberships-addr={{ .Values.membershipsAddr }}
+ volumeMounts:
+ - name: ssh-key
+ readOnly: true
+ mountPath: /pcloud/ssh-key
diff --git a/charts/welcome/values.yaml b/charts/welcome/values.yaml
new file mode 100644
index 0000000..a0b42b1
--- /dev/null
+++ b/charts/welcome/values.yaml
@@ -0,0 +1,14 @@
+image:
+ repository: giolekva/pcloud-installer
+ tag: latest
+ pullPolicy: Always
+repoAddr: 192.168.0.11
+sshPrivateKey: key
+createAccountAddr: http://api.core-auth.svc.cluster.local/identities
+loginAddr: https://accounts-ui.example.com
+membershipsAddr: http://memberships.example.svc.cluster.local
+ingress:
+ className: pcloud-ingress-public
+ domain: welcome.example.com
+ certificateIssuer: example-public
+clusterRoleName: example-welcome
diff --git a/charts/zot/.helmignore b/charts/zot/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/zot/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/zot/Chart.yaml b/charts/zot/Chart.yaml
new file mode 100644
index 0000000..607d093
--- /dev/null
+++ b/charts/zot/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+appVersion: v2.0.3
+description: A Helm chart for Kubernetes
+name: zot
+type: application
+version: 0.1.53
diff --git a/charts/zot/templates/NOTES.txt b/charts/zot/templates/NOTES.txt
new file mode 100644
index 0000000..45572c7
--- /dev/null
+++ b/charts/zot/templates/NOTES.txt
@@ -0,0 +1,6 @@
+Get the application URL by running these commands:
+{{- if contains "NodePort" .Values.service.type }}
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "zot.fullname" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{- end }}
\ No newline at end of file
diff --git a/charts/zot/templates/_helpers.tpl b/charts/zot/templates/_helpers.tpl
new file mode 100644
index 0000000..67db10e
--- /dev/null
+++ b/charts/zot/templates/_helpers.tpl
@@ -0,0 +1,62 @@
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "zot.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "zot.fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "zot.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "zot.labels" -}}
+helm.sh/chart: {{ include "zot.chart" . }}
+{{ include "zot.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "zot.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "zot.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "zot.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create }}
+{{- default (include "zot.fullname" .) .Values.serviceAccount.name }}
+{{- else }}
+{{- default "default" .Values.serviceAccount.name }}
+{{- end }}
+{{- end }}
diff --git a/charts/zot/templates/configmap.yaml b/charts/zot/templates/configmap.yaml
new file mode 100644
index 0000000..a6a29c3
--- /dev/null
+++ b/charts/zot/templates/configmap.yaml
@@ -0,0 +1,10 @@
+{{- if and .Values.mountConfig .Values.configFiles }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Release.Name }}-config
+data:
+{{- range $key, $val := .Values.configFiles }}
+ {{ $key }}: {{ $val | quote }}
+{{- end }}
+{{- end }}
diff --git a/charts/zot/templates/deployment.yaml b/charts/zot/templates/deployment.yaml
new file mode 100644
index 0000000..68b0342
--- /dev/null
+++ b/charts/zot/templates/deployment.yaml
@@ -0,0 +1,146 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "zot.fullname" . }}
+ labels:
+ {{- include "zot.labels" . | nindent 4 }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ {{- with .Values.strategy }}
+ strategy:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ selector:
+ matchLabels:
+ {{- include "zot.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ annotations:
+ {{- with .Values.podAnnotations }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- if and .Values.mountConfig .Values.configFiles }}
+ checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
+ {{- end }}
+ {{- if .Values.forceRoll }}
+ rollme: {{ randAlphaNum 5 | quote }}
+ {{- end }}
+ labels:
+ {{- include "zot.selectorLabels" . | nindent 8 }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include "zot.serviceAccountName" . }}
+ securityContext:
+ {{- toYaml .Values.podSecurityContext | nindent 8 }}
+ containers:
+ - name: {{ .Chart.Name }}
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 12 }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ {{- toYaml .Values.env | nindent 12 }}
+ ports:
+ - name: zot
+ containerPort: 5000
+ protocol: TCP
+ {{- if or (not (empty .Values.extraVolumeMounts)) .Values.mountConfig .Values.mountSecret .Values.persistence .Values.externalSecrets }}
+ volumeMounts:
+ {{- if .Values.mountConfig }}
+ - mountPath: '/etc/zot'
+ name: {{ .Release.Name }}-config
+ {{- end }}
+ {{- if .Values.mountSecret }}
+ - mountPath: '/secret'
+ name: {{ .Release.Name }}-secret
+ {{- end }}
+ {{- range .Values.externalSecrets }}
+ - mountPath: {{ .mountPath | quote }}
+ name: {{ .secretName | quote }}
+ {{- end }}
+ {{- if .Values.persistence }}
+ - mountPath: '/var/lib/registry'
+ name: {{ .Release.Name }}-volume
+ {{- end }}
+ {{- with .Values.extraVolumeMounts }}
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ # livenessProbe:
+ # initialDelaySeconds: 5
+ # httpGet:
+ # path: /v2/
+ # port: 5000
+ # scheme: {{ .Values.httpGet.scheme }}
+ # {{- if .Values.authHeader }}
+ # httpHeaders:
+ # - name: Authorization
+ # value: Basic {{ .Values.authHeader }}
+ # {{- end }}
+ # readinessProbe:
+ # initialDelaySeconds: 5
+ # httpGet:
+ # path: /v2/
+ # port: 5000
+ # scheme: {{ .Values.httpGet.scheme }}
+ # {{- if .Values.authHeader }}
+ # httpHeaders:
+ # - name: Authorization
+ # value: Basic {{ .Values.authHeader }}
+ # {{- end }}
+ # startupProbe:
+ # initialDelaySeconds: {{ .Values.startupProbe.initialDelaySeconds }}
+ # periodSeconds: {{ .Values.startupProbe.periodSeconds }}
+ # failureThreshold: {{ .Values.startupProbe.failureThreshold }}
+ # httpGet:
+ # path: /v2/
+ # port: 5000
+ # scheme: {{ .Values.httpGet.scheme }}
+ # {{- if .Values.authHeader }}
+ # httpHeaders:
+ # - name: Authorization
+ # value: Basic {{ .Values.authHeader }}
+ # {{- end }}
+ resources:
+ {{- toYaml .Values.resources | nindent 12 }}
+ {{- if or .Values.mountConfig .Values.mountSecret .Values.persistence .Values.externalSecrets (not (empty .Values.extraVolumes))}}
+ volumes:
+ {{- if .Values.mountConfig }}
+ - name: {{ .Release.Name }}-config
+ configMap:
+ name: {{ .Release.Name }}-config
+ {{- end }}
+ {{- if .Values.mountSecret }}
+ - name: {{ .Release.Name }}-secret
+ secret:
+ secretName: {{ .Release.Name }}-secret
+ {{- end }}
+ {{- range .Values.externalSecrets }}
+ - name: {{ .secretName }}
+ secret:
+ secretName: {{ .secretName }}
+ {{- end }}
+ {{- if .Values.persistence }}
+ - name: {{ .Release.Name }}-volume
+ persistentVolumeClaim:
+ claimName: {{ .Values.pvc.name | default (printf "%s-pvc" .Release.Name) }}
+ {{- end }}
+ {{- with .Values.extraVolumes }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- end }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/charts/zot/templates/ingress.yaml b/charts/zot/templates/ingress.yaml
new file mode 100644
index 0000000..02d9dbf
--- /dev/null
+++ b/charts/zot/templates/ingress.yaml
@@ -0,0 +1,63 @@
+{{- if .Values.ingress.enabled -}}
+{{- $fullName := include "zot.fullname" . -}}
+{{- $httpPort := .Values.service.port -}}
+{{- $pathtype := .Values.ingress.pathtype -}}
+{{- $ingressPath := .Values.ingress.path -}}
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: {{ $fullName }}
+ labels:
+ app: {{ .Chart.Name }}
+ release: {{ .Release.Name }}
+{{- with .Values.ingress.annotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+spec:
+ {{- if .Values.ingress.className }}
+ ingressClassName: {{ .Values.ingress.className | quote }}
+ {{- end }}
+{{- if .Values.ingress.tls }}
+ tls:
+ {{- if .ingressPath }}
+ {{- range .Values.ingress.tls }}
+ - hosts:
+ {{- range .hosts }}
+ - {{ . }}
+ {{- end }}
+ secretName: {{ .secretName }}
+ {{- end }}
+{{- else }}
+{{ toYaml .Values.ingress.tls | indent 4 }}
+ {{- end }}
+{{- end}}
+ rules:
+ {{- range .Values.ingress.hosts }}
+ {{- if $ingressPath }}
+ - host: {{ . }}
+ http:
+ paths:
+ - path: {{ $ingressPath }}
+ pathType: {{ $pathtype }}
+ backend:
+ service:
+ name: {{ $fullName }}
+ port:
+ number: {{ $httpPort }}
+ {{- else }}
+ - host: {{ .host }}
+ http:
+ paths:
+ {{- range .paths }}
+ - path: {{ .path }}
+ pathType: {{ $pathtype }}
+ backend:
+ service:
+ name: {{ $fullName }}
+ port:
+ number: {{ .servicePort | default $httpPort }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
diff --git a/charts/zot/templates/pvc.yaml b/charts/zot/templates/pvc.yaml
new file mode 100644
index 0000000..d30c59a
--- /dev/null
+++ b/charts/zot/templates/pvc.yaml
@@ -0,0 +1,15 @@
+{{- if and .Values.persistence .Values.pvc .Values.pvc.create }}
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{ .Values.pvc.name | default (printf "%s-pvc" .Release.Name) }}
+spec:
+ accessModes:
+ - {{ .Values.pvc.accessMode | default "ReadWriteOnce" }}
+ resources:
+ requests:
+ storage: {{ .Values.pvc.storage | default "8Gi" }}
+ {{- if .Values.pvc.storageClassName }}
+ storageClassName: {{ .Values.pvc.storageClassName }}
+ {{- end }}
+{{- end }}
diff --git a/charts/zot/templates/secret.yaml b/charts/zot/templates/secret.yaml
new file mode 100644
index 0000000..4d5f8d1
--- /dev/null
+++ b/charts/zot/templates/secret.yaml
@@ -0,0 +1,11 @@
+{{- if and .Values.mountSecret .Values.secretFiles }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-secret
+type: Opaque
+data:
+{{- range $key, $val := .Values.secretFiles }}
+ {{ $key }}: {{ $val | b64enc }}
+{{- end }}
+{{- end }}
diff --git a/charts/zot/templates/service.yaml b/charts/zot/templates/service.yaml
new file mode 100644
index 0000000..14b997b
--- /dev/null
+++ b/charts/zot/templates/service.yaml
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "zot.fullname" . }}
+ labels:
+ {{- include "zot.labels" . | nindent 4 }}
+{{- with .Values.service.annotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+spec:
+ type: {{ .Values.service.type }}
+ {{- if and .Values.service.clusterIP ( eq .Values.service.type "ClusterIP" ) }}
+ clusterIP: {{ .Values.service.clusterIP }}
+ {{- end }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: zot
+ protocol: TCP
+ name: zot
+ {{- if .Values.service.nodePort }}
+ nodePort: {{ .Values.service.nodePort }}
+ {{- end }}
+ selector:
+ {{- include "zot.selectorLabels" . | nindent 4 }}
diff --git a/charts/zot/templates/serviceaccount.yaml b/charts/zot/templates/serviceaccount.yaml
new file mode 100644
index 0000000..23f5efa
--- /dev/null
+++ b/charts/zot/templates/serviceaccount.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "zot.serviceAccountName" . }}
+ labels:
+ {{- include "zot.labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/zot/templates/tests/test-connection-fails.yaml b/charts/zot/templates/tests/test-connection-fails.yaml
new file mode 100644
index 0000000..0e7a059
--- /dev/null
+++ b/charts/zot/templates/tests/test-connection-fails.yaml
@@ -0,0 +1,28 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ include "zot.fullname" . }}-test-connection-fails"
+ labels:
+ {{- include "zot.labels" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": test
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded,hook-failed
+spec:
+ containers:
+ - name: wget
+ image: alpine:3.18
+ command:
+ - sh
+ - -c
+ - |
+ scheme="http"
+ {{- if eq $.Values.httpGet.scheme "HTTPS"}}
+ scheme="https"
+ {{- end }}
+ echo "$scheme"
+ {{- if .Values.authHeader }}
+ wget --no-check-certificate -o output $scheme://{{ include "zot.fullname" . }}:{{ .Values.service.port }}/v2/_catalog || (grep Unauthorized output)
+ {{- else }}
+ wget --no-check-certificate $scheme://{{ include "zot.fullname" . }}:{{ .Values.service.port }}/v2/_catalog
+ {{- end }}
+ restartPolicy: Never
diff --git a/charts/zot/templates/tests/test-connection.yaml b/charts/zot/templates/tests/test-connection.yaml
new file mode 100644
index 0000000..59c64b4
--- /dev/null
+++ b/charts/zot/templates/tests/test-connection.yaml
@@ -0,0 +1,28 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ include "zot.fullname" . }}-test-connection"
+ labels:
+ {{- include "zot.labels" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": test
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded,hook-failed
+spec:
+ containers:
+ - name: wget
+ image: alpine:3.18
+ command:
+ - sh
+ - -c
+ - |
+ scheme="http"
+ {{- if eq $.Values.httpGet.scheme "HTTPS"}}
+ scheme="https"
+ {{- end }}
+ echo "$scheme"
+ {{- if .Values.authHeader }}
+ wget --no-check-certificate --header "Authorization: Basic {{ .Values.authHeader }}" $scheme://{{ include "zot.fullname" . }}:{{ .Values.service.port }}/v2/_catalog
+ {{- else }}
+ wget --no-check-certificate $scheme://{{ include "zot.fullname" . }}:{{ .Values.service.port }}/v2/_catalog
+ {{- end }}
+ restartPolicy: Never
diff --git a/charts/zot/unittests/__snapshot__/ingress_test.yaml.snap b/charts/zot/unittests/__snapshot__/ingress_test.yaml.snap
new file mode 100644
index 0000000..115a740
--- /dev/null
+++ b/charts/zot/unittests/__snapshot__/ingress_test.yaml.snap
@@ -0,0 +1,29 @@
+should match snapshot of default values:
+ 1: |
+ apiVersion: networking.k8s.io/v1
+ kind: Ingress
+ metadata:
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ kubernetes.io/tls-acme: "true"
+ labels:
+ app: zot
+ release: RELEASE-NAME
+ name: RELEASE-NAME-zot
+ spec:
+ ingressClassName: nginx-test
+ rules:
+ - host: chart-example.local
+ http:
+ paths:
+ - backend:
+ service:
+ name: RELEASE-NAME-zot
+ port:
+ number: 5000
+ path: /
+ pathType: ImplementationSpecific
+ tls:
+ - hosts:
+ - chart-example.local
+ secretName: chart-example-tls
diff --git a/charts/zot/unittests/configmap_checksum_test.yaml b/charts/zot/unittests/configmap_checksum_test.yaml
new file mode 100644
index 0000000..9d366e4
--- /dev/null
+++ b/charts/zot/unittests/configmap_checksum_test.yaml
@@ -0,0 +1,23 @@
+suite: configmap checksum in deployment
+# Can't use global templates in this test suite as it will break the checksum calculation
+# causing false negative test outcome.
+# templates:
+# - deployment.yaml
+tests:
+ - it: has no checksum/config if no config
+ template: deployment.yaml
+ asserts:
+ - isNull:
+ path: spec.template.metadata.annotations.checksum/config
+ - it: generate checksum/config if config is present
+ template: deployment.yaml
+ set:
+ mountConfig: true
+ configFiles:
+ config.json: "{}"
+ asserts:
+ - isNotNull:
+ path: spec.template.metadata.annotations.checksum/config
+ - matchRegex:
+ path: spec.template.metadata.annotations.checksum/config
+ pattern: "^[a-f0-9]{64}$" # SHA256 hex output
diff --git a/charts/zot/unittests/ingress_test.yaml b/charts/zot/unittests/ingress_test.yaml
new file mode 100644
index 0000000..0e71833
--- /dev/null
+++ b/charts/zot/unittests/ingress_test.yaml
@@ -0,0 +1,47 @@
+suite: test ingress
+templates:
+ - ingress.yaml
+tests:
+ - it: should be empty if ingress is not enabled
+ asserts:
+ - hasDocuments:
+ count: 0
+ - it: should have apiVersion networking.k8s.io/v1 for k8s >= 1.19
+ set:
+ ingress.enabled: true
+ capabilities:
+ majorVersion: 1
+ minorVersion: 19
+ asserts:
+ - hasDocuments:
+ count: 1
+ - isKind:
+ of: Ingress
+ - isAPIVersion:
+ of: networking.k8s.io/v1
+ - it: should have an ingressClassName for k8s >= 1.19
+ set:
+ ingress.enabled: true
+ ingress.className: nginx-test
+ capabilities:
+ majorVersion: 1
+ minorVersion: 19
+ asserts:
+ - hasDocuments:
+ count: 1
+ - equal:
+ path: spec.ingressClassName
+ value: nginx-test
+ - it: should match snapshot of default values
+ set:
+ ingress.enabled: true
+ ingress.className: nginx-test
+ ingress.annotations:
+ kubernetes.io/ingress.class: nginx
+ kubernetes.io/tls-acme: "true"
+ ingress.tls:
+ - secretName: chart-example-tls
+ hosts:
+ - chart-example.local
+ asserts:
+ - matchSnapshot: { }
diff --git a/charts/zot/values.yaml b/charts/zot/values.yaml
new file mode 100644
index 0000000..926940a
--- /dev/null
+++ b/charts/zot/values.yaml
@@ -0,0 +1,181 @@
+# Default values for zot.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+replicaCount: 1
+image:
+ repository: ghcr.io/project-zot/zot-linux-amd64
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: "v2.0.3"
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name: ""
+service:
+ type: NodePort
+ port: 5000
+ nodePort: null # Set to a specific port if type is NodePort
+ # Annotations to add to the service
+ annotations: {}
+ # Set to a static IP if a static IP is desired, only works when
+ # type: ClusterIP
+ clusterIP: null
+# Enabling this will publicly expose your zot server
+# Only enable this if you have security enabled on your cluster
+ingress:
+ enabled: false
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ # If using nginx, disable body limits and increase read and write timeouts
+ # nginx.ingress.kubernetes.io/proxy-body-size: "0"
+ # nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
+ # nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
+ className: "nginx"
+ pathtype: ImplementationSpecific
+ hosts:
+ - host: chart-example.local
+ paths:
+ - path: /
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+# By default, Kubernetes HTTP probes use HTTP 'scheme'. So if TLS is enabled
+# in configuration, to prevent failures, the scheme must be set to 'HTTPS'.
+httpGet:
+ scheme: HTTP
+# By default, Kubernetes considers a Pod healthy if the liveness probe returns
+# successfully. However, sometimes applications need additional startup time on
+# their first initialization. By defining a startupProbe, we can allow the
+# application to take extra time for initialization without compromising fast
+# response to deadlocks.
+startupProbe:
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ failureThreshold: 3
+# If mountConfig is true the configMap named $CHART_RELEASE-config is mounted
+# on the pod's '/etc/zot' directory
+mountConfig: false
+# If mountConfig is true the chart creates the '$CHART_RELEASE-config', if it
+# does not exist the user is in charge of managing it (as this file includes a
+# sample file you have to add it empty to handle it externally) ... note that
+# the service does not reload the configFiles once mounted, so you need to
+# delete the pods to create new ones to use the new values.
+configFiles:
+ config.json: |-
+ {
+ "storage": { "rootDirectory": "/var/lib/registry" },
+ "http": { "address": "0.0.0.0", "port": "5000" },
+ "log": { "level": "debug" }
+ }
+# Alternatively, the configuration can include authentication and acessControl
+# data and we can use mountSecret option for the passwords.
+#
+# config.json: |-
+# {
+# "storage": { "rootDirectory": "/var/lib/registry" },
+# "http": {
+# "address": "0.0.0.0",
+# "port": "5000",
+# "auth": { "htpasswd": { "path": "/secret/htpasswd" } },
+# "accessControl": {
+# "repositories": {
+# "**": {
+# "policies": [{
+# "users": ["user"],
+# "actions": ["read"]
+# }],
+# "defaultPolicy": []
+# }
+# },
+# "adminPolicy": {
+# "users": ["admin"],
+# "actions": ["read", "create", "update", "delete"]
+# }
+# }
+# },
+# "log": { "level": "debug" }
+# }
+
+# externalSecrets allows to mount external (meaning not managed by this chart)
+# Kubernetes secrets within the Zot container.
+# The secret is identified by its name (property "secretName") and should be
+# present in the same namespace. The property "mountPath" specifies the path
+# within the container filesystem where the secret is mounted.
+#
+# Below is an example:
+#
+# externalSecrets:
+# - secretName: "secret1"
+# mountPath: "/secrets/s1"
+# - secretName: "secret2"
+# mountPath: "/secrets/s2"
+externalSecrets: []
+# If mountSecret is true, the Secret named $CHART_RELEASE-secret is mounted on
+# the pod's '/secret' directory (it is used to keep files with passwords, like
+# a `htpasswd` file)
+mountSecret: false
+# If secretFiles does not exist the user is in charge of managing it, again, if
+# you want to manage it the value has to be added empty to avoid using this one
+secretFiles:
+ # Example htpasswd with 'admin:admin' & 'user:user' user:pass pairs
+ htpasswd: |-
+ admin:$2y$05$vmiurPmJvHylk78HHFWuruFFVePlit9rZWGA/FbZfTEmNRneGJtha
+ user:$2y$05$L86zqQDfH5y445dcMlwu6uHv.oXFgT6AiJCwpv3ehr7idc0rI3S2G
+# Authentication string for Kubernetes probes, which is needed when `htpasswd`
+# authentication is enabled, but the anonymous access policy is not.
+# It contains a `user:password` string encoded in base64. The example value is
+# from running `echo -n "foo:var" | base64`
+# authHeader: "Zm9vOmJhcg=="
+
+# If persistence is 'true' the service uses a persistentVolumeClaim to mount a
+# volume for zot on '/var/lib/registry'; by default the pvc used is named
+# '$CHART_RELEASE-pvc', but the name can be changed below
+persistence: false
+# PVC data, only used if persistence is 'true'
+pvc:
+ # Make the chart create the PVC, this option is used with storageClasses that
+ # can create volumes dynamically, if that is not the case is better to do it
+ # manually and set create to false
+ create: false
+ # Name of the PVC to use or create if persistence is enabled, if not set the
+ # value '$CHART_RELEASE-pvc' is used
+ name: null
+ # Volume access mode, if using more than one replica we need
+ accessMode: "ReadWriteOnce"
+ # Size of the volume requested
+ storage: 8Gi
+ # Name of the storage class to use if it is different than the default one
+ storageClassName: null
+# List of environment variables to set on the container
+env:
+# - name: "TEST"
+# value: "ME"
+# - name: SECRET_NAME
+# valueFrom:
+# secretKeyRef:
+# name: mysecret
+# key: username
+
+# Extra Volume Mounts
+extraVolumeMounts: []
+# - name: data
+# mountPath: /var/lib/registry
+
+# Extra Volumes
+extraVolumes: []
+# - name: data
+# emptyDir: {}
+
+# Deployment strategy type
+strategy:
+ type: RollingUpdate
+# rollingUpdate:
+# maxUnavailable: 25%
+
+podAnnotations: {}