blob: 5c2a655a7217fa040afafa7b0925233036720da9 [file] [log] [blame]
gioc9b92b12025-05-22 08:57:18 +04001# Default values for longhorn.
2# This is a YAML-formatted file.
3# Declare variables to be passed into your templates.
4global:
5 # -- Toleration for nodes allowed to run user-deployed components such as Longhorn Manager, Longhorn UI, and Longhorn Driver Deployer.
6 tolerations: []
7 # -- Node selector for nodes allowed to run user-deployed components such as Longhorn Manager, Longhorn UI, and Longhorn Driver Deployer.
8 nodeSelector: {}
9 cattle:
10 # -- Default system registry.
11 systemDefaultRegistry: ""
12 windowsCluster:
13 # -- Setting that allows Longhorn to run on a Rancher Windows cluster.
14 enabled: false
15 # -- Toleration for Linux nodes that can run user-deployed Longhorn components.
16 tolerations:
17 - key: "cattle.io/os"
18 value: "linux"
19 effect: "NoSchedule"
20 operator: "Equal"
21 # -- Node selector for Linux nodes that can run user-deployed Longhorn components.
22 nodeSelector:
23 kubernetes.io/os: "linux"
24 defaultSetting:
25 # -- Toleration for system-managed Longhorn components.
26 taintToleration: cattle.io/os=linux:NoSchedule
27 # -- Node selector for system-managed Longhorn components.
28 systemManagedComponentsNodeSelector: kubernetes.io/os:linux
29
30networkPolicies:
31 # -- Setting that allows you to enable network policies that control access to Longhorn pods.
32 enabled: false
33 # -- Distribution that determines the policy for allowing access for an ingress. (Options: "k3s", "rke2", "rke1")
34 type: "k3s"
35
36image:
37 longhorn:
38 engine:
39 # -- Repository for the Longhorn Engine image.
40 repository: longhornio/longhorn-engine
41 # -- Specify Longhorn engine image tag
42 tag: v1.6.4
43 manager:
44 # -- Repository for the Longhorn Manager image.
45 repository: longhornio/longhorn-manager
46 # -- Specify Longhorn manager image tag
47 tag: v1.6.4
48 ui:
49 # -- Repository for the Longhorn UI image.
50 repository: longhornio/longhorn-ui
51 # -- Specify Longhorn ui image tag
52 tag: v1.6.4
53 instanceManager:
54 # -- Repository for the Longhorn Instance Manager image.
55 repository: longhornio/longhorn-instance-manager
56 # -- Specify Longhorn instance manager image tag
57 tag: v1.6.4
58 shareManager:
59 # -- Repository for the Longhorn Share Manager image.
60 repository: longhornio/longhorn-share-manager
61 # -- Specify Longhorn share manager image tag
62 tag: v1.6.4
63 backingImageManager:
64 # -- Repository for the Backing Image Manager image. When unspecified, Longhorn uses the default value.
65 repository: longhornio/backing-image-manager
66 # -- Specify Longhorn backing image manager image tag
67 tag: v1.6.4
68 supportBundleKit:
69 # -- Repository for the Longhorn Support Bundle Manager image.
70 repository: longhornio/support-bundle-kit
71 # -- Tag for the Longhorn Support Bundle Manager image.
72 tag: v0.0.48
73 csi:
74 attacher:
75 # -- Repository for the CSI attacher image. When unspecified, Longhorn uses the default value.
76 repository: longhornio/csi-attacher
77 # -- Tag for the CSI attacher image. When unspecified, Longhorn uses the default value.
78 tag: v4.7.0-20241219
79 provisioner:
80 # -- Repository for the CSI Provisioner image. When unspecified, Longhorn uses the default value.
81 repository: longhornio/csi-provisioner
82 # -- Tag for the CSI Provisioner image. When unspecified, Longhorn uses the default value.
83 tag: v3.6.4-20241219
84 nodeDriverRegistrar:
85 # -- Repository for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value.
86 repository: longhornio/csi-node-driver-registrar
87 # -- Tag for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value.
88 tag: v2.12.0-20241219
89 resizer:
90 # -- Repository for the CSI Resizer image. When unspecified, Longhorn uses the default value.
91 repository: longhornio/csi-resizer
92 # -- Tag for the CSI Resizer image. When unspecified, Longhorn uses the default value.
93 tag: v1.12.0-20241219
94 snapshotter:
95 # -- Repository for the CSI Snapshotter image. When unspecified, Longhorn uses the default value.
96 repository: longhornio/csi-snapshotter
97 # -- Tag for the CSI Snapshotter image. When unspecified, Longhorn uses the default value.
98 tag: v6.3.4-20241219
99 livenessProbe:
100 # -- Repository for the CSI liveness probe image. When unspecified, Longhorn uses the default value.
101 repository: longhornio/livenessprobe
102 # -- Tag for the CSI liveness probe image. When unspecified, Longhorn uses the default value.
103 tag: v2.14.0-20241219
104 openshift:
105 oauthProxy:
106 # -- Repository for the OAuth Proxy image. Specify the upstream image (for example, "quay.io/openshift/origin-oauth-proxy"). This setting applies only to OpenShift users.
107 repository: ""
108 # -- Tag for the OAuth Proxy image. Specify OCP/OKD version 4.1 or later (including version 4.15, which is available at quay.io/openshift/origin-oauth-proxy:4.15). This setting applies only to OpenShift users.
109 tag: ""
110 # -- Image pull policy that applies to all user-deployed Longhorn components, such as Longhorn Manager, Longhorn driver, and Longhorn UI.
111 pullPolicy: IfNotPresent
112
113service:
114 ui:
115 # -- Service type for Longhorn UI. (Options: "ClusterIP", "NodePort", "LoadBalancer", "Rancher-Proxy")
116 type: ClusterIP
117 # -- NodePort port number for Longhorn UI. When unspecified, Longhorn selects a free port between 30000 and 32767.
118 nodePort: null
119 manager:
120 # -- Service type for Longhorn Manager.
121 type: ClusterIP
122 # -- NodePort port number for Longhorn Manager. When unspecified, Longhorn selects a free port between 30000 and 32767.
123 nodePort: ""
124
125persistence:
126 # -- Setting that allows you to specify the default Longhorn StorageClass.
127 defaultClass: true
128 # -- Filesystem type of the default Longhorn StorageClass.
129 defaultFsType: ext4
130 # -- mkfs parameters of the default Longhorn StorageClass.
131 defaultMkfsParams: ""
132 # -- Replica count of the default Longhorn StorageClass.
133 defaultClassReplicaCount: 3
134 # -- Data locality of the default Longhorn StorageClass. (Options: "disabled", "best-effort")
135 defaultDataLocality: disabled
136 # -- Reclaim policy that provides instructions for handling of a volume after its claim is released. (Options: "Retain", "Delete")
137 reclaimPolicy: Delete
138 # -- Setting that allows you to enable live migration of a Longhorn volume from one node to another.
139 migratable: false
140 # -- Set NFS mount options for Longhorn StorageClass for RWX volumes
141 nfsOptions: ""
142 recurringJobSelector:
143 # -- Setting that allows you to enable the recurring job selector for a Longhorn StorageClass.
144 enable: false
145 # -- Recurring job selector for a Longhorn StorageClass. Ensure that quotes are used correctly when specifying job parameters. (Example: `[{"name":"backup", "isGroup":true}]`)
146 jobList: []
147 backingImage:
148 # -- Setting that allows you to use a backing image in a Longhorn StorageClass.
149 enable: false
150 # -- Backing image to be used for creating and restoring volumes in a Longhorn StorageClass. When no backing images are available, specify the data source type and parameters that Longhorn can use to create a backing image.
151 name: ~
152 # -- Data source type of a backing image used in a Longhorn StorageClass.
153 # If the backing image exists in the cluster, Longhorn uses this setting to verify the image.
154 # If the backing image does not exist, Longhorn creates one using the specified data source type.
155 dataSourceType: ~
156 # -- Data source parameters of a backing image used in a Longhorn StorageClass.
157 # You can specify a JSON string of a map. (Example: `'{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'`)
158 dataSourceParameters: ~
159 # -- Expected SHA-512 checksum of a backing image used in a Longhorn StorageClass.
160 expectedChecksum: ~
161 defaultNodeSelector:
162 # -- Setting that allows you to enable the node selector for the default Longhorn StorageClass.
163 enable: false
164 # -- Node selector for the default Longhorn StorageClass. Longhorn uses only nodes with the specified tags for storing volume data. (Examples: "storage,fast")
165 selector: ""
166 # -- Setting that allows you to enable automatic snapshot removal during filesystem trim for a Longhorn StorageClass. (Options: "ignored", "enabled", "disabled")
167 removeSnapshotsDuringFilesystemTrim: ignored
168
169preUpgradeChecker:
170 # -- Setting that allows Longhorn to perform pre-upgrade checks. Disable this setting when installing Longhorn using Argo CD or other GitOps solutions.
171 jobEnabled: true
172 # -- Setting that allows Longhorn to perform upgrade version checks after starting the Longhorn Manager DaemonSet Pods. Disabling this setting also disables `preUpgradeChecker.jobEnabled`. Longhorn recommends keeping this setting enabled.
173 upgradeVersionCheck: true
174
175csi:
176 # -- kubelet root directory. When unspecified, Longhorn uses the default value.
177 kubeletRootDir: ~
178 # -- Replica count of the CSI Attacher. When unspecified, Longhorn uses the default value ("3").
179 attacherReplicaCount: ~
180 # -- Replica count of the CSI Provisioner. When unspecified, Longhorn uses the default value ("3").
181 provisionerReplicaCount: ~
182 # -- Replica count of the CSI Resizer. When unspecified, Longhorn uses the default value ("3").
183 resizerReplicaCount: ~
184 # -- Replica count of the CSI Snapshotter. When unspecified, Longhorn uses the default value ("3").
185 snapshotterReplicaCount: ~
186
187defaultSettings:
188 # -- Endpoint used to access the backupstore. (Options: "NFS", "CIFS", "AWS", "GCP", "AZURE")
189 backupTarget: ~
190 # -- Name of the Kubernetes secret associated with the backup target.
191 backupTargetCredentialSecret: ~
192 # -- Setting that allows Longhorn to automatically attach a volume and create snapshots or backups when recurring jobs are run.
193 allowRecurringJobWhileVolumeDetached: ~
194 # -- Setting that allows Longhorn to automatically create a default disk only on nodes with the label "node.longhorn.io/create-default-disk=true" (if no other disks exist). When this setting is disabled, Longhorn creates a default disk on each node that is added to the cluster.
195 createDefaultDiskLabeledNodes: ~
196 # -- Default path for storing data on a host. The default value is "/var/lib/longhorn/".
197 defaultDataPath: ~
198 # -- Default data locality. A Longhorn volume has data locality if a local replica of the volume exists on the same node as the pod that is using the volume.
199 defaultDataLocality: ~
200 # -- Setting that allows scheduling on nodes with healthy replicas of the same volume. This setting is disabled by default.
201 replicaSoftAntiAffinity: ~
202 # -- Setting that automatically rebalances replicas when an available node is discovered.
203 replicaAutoBalance: ~
204 # -- Percentage of storage that can be allocated relative to hard drive capacity. The default value is "100".
205 storageOverProvisioningPercentage: ~
206 # -- Percentage of minimum available disk capacity. When the minimum available capacity exceeds the total available capacity, the disk becomes unschedulable until more space is made available for use. The default value is "25".
207 storageMinimalAvailablePercentage: ~
208 # -- Percentage of disk space that is not allocated to the default disk on each new Longhorn node.
209 storageReservedPercentageForDefaultDisk: ~
210 # -- Upgrade Checker that periodically checks for new Longhorn versions. When a new version is available, a notification appears on the Longhorn UI. This setting is enabled by default
211 upgradeChecker: ~
212 # -- Default number of replicas for volumes created using the Longhorn UI. For Kubernetes configuration, modify the `numberOfReplicas` field in the StorageClass. The default value is "3".
213 defaultReplicaCount: ~
214 # -- Default Longhorn StorageClass. "storageClassName" is assigned to PVs and PVCs that are created for an existing Longhorn volume. "storageClassName" can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. The default value is "longhorn-static".
215 defaultLonghornStaticStorageClass: ~
216 # -- Number of seconds that Longhorn waits before checking the backupstore for new backups. The default value is "300". When the value is "0", polling is disabled.
217 backupstorePollInterval: ~
218 # -- Number of minutes that Longhorn keeps a failed backup resource. When the value is "0", automatic deletion is disabled.
219 failedBackupTTL: ~
220 # -- Setting that restores recurring jobs from a backup volume on a backup target and creates recurring jobs if none exist during backup restoration.
221 restoreVolumeRecurringJobs: ~
222 # -- Maximum number of successful recurring backup and snapshot jobs to be retained. When the value is "0", a history of successful recurring jobs is not retained.
223 recurringSuccessfulJobsHistoryLimit: ~
224 # -- Maximum number of failed recurring backup and snapshot jobs to be retained. When the value is "0", a history of failed recurring jobs is not retained.
225 recurringFailedJobsHistoryLimit: ~
226 # -- Maximum number of snapshots or backups to be retained.
227 recurringJobMaxRetention: ~
228 # -- Maximum number of failed support bundles that can exist in the cluster. When the value is "0", Longhorn automatically purges all failed support bundles.
229 supportBundleFailedHistoryLimit: ~
230 # -- Taint or toleration for system-managed Longhorn components.
231 # Specify values using a semicolon-separated list in `kubectl taint` syntax (Example: key1=value1:effect; key2=value2:effect).
232 taintToleration: ~
233 # -- Node selector for system-managed Longhorn components.
234 systemManagedComponentsNodeSelector: ~
235 # -- PriorityClass for system-managed Longhorn components.
236 # This setting can help prevent Longhorn components from being evicted under Node Pressure.
237 # Notice that this will be applied to Longhorn user-deployed components by default if there are no priority class values set yet, such as `longhornManager.priorityClass`.
238 priorityClass: &defaultPriorityClassNameRef "longhorn-critical"
239 # -- Setting that allows Longhorn to automatically salvage volumes when all replicas become faulty (for example, when the network connection is interrupted). Longhorn determines which replicas are usable and then uses these replicas for the volume. This setting is enabled by default.
240 autoSalvage: ~
241 # -- Setting that allows Longhorn to automatically delete a workload pod that is managed by a controller (for example, daemonset) whenever a Longhorn volume is detached unexpectedly (for example, during Kubernetes upgrades). After deletion, the controller restarts the pod and then Kubernetes handles volume reattachment and remounting.
242 autoDeletePodWhenVolumeDetachedUnexpectedly: ~
243 # -- Setting that prevents Longhorn Manager from scheduling replicas on a cordoned Kubernetes node. This setting is enabled by default.
244 disableSchedulingOnCordonedNode: ~
245 # -- Setting that allows Longhorn to schedule new replicas of a volume to nodes in the same zone as existing healthy replicas. Nodes that do not belong to any zone are treated as existing in the zone that contains healthy replicas. When identifying zones, Longhorn relies on the label "topology.kubernetes.io/zone=<Zone name of the node>" in the Kubernetes node object.
246 replicaZoneSoftAntiAffinity: ~
247 # -- Setting that allows scheduling on disks with existing healthy replicas of the same volume. This setting is enabled by default.
248 replicaDiskSoftAntiAffinity: ~
249 # -- Policy that defines the action Longhorn takes when a volume is stuck with a StatefulSet or Deployment pod on a node that failed.
250 nodeDownPodDeletionPolicy: ~
251 # -- Policy that defines the action Longhorn takes when a node with the last healthy replica of a volume is drained.
252 nodeDrainPolicy: ~
253 # -- Setting that allows automatic detaching of manually-attached volumes when a node is cordoned.
254 detachManuallyAttachedVolumesWhenCordoned: ~
255 # -- Number of seconds that Longhorn waits before reusing existing data on a failed replica instead of creating a new replica of a degraded volume.
256 replicaReplenishmentWaitInterval: ~
257 # -- Maximum number of replicas that can be concurrently rebuilt on each node.
258 concurrentReplicaRebuildPerNodeLimit: ~
259 # -- Maximum number of volumes that can be concurrently restored on each node using a backup. When the value is "0", restoration of volumes using a backup is disabled.
260 concurrentVolumeBackupRestorePerNodeLimit: ~
261 # -- Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the "volume-head-xxx.img" file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery. This setting applies only to volumes created using the Longhorn UI.
262 disableRevisionCounter: ~
263 # -- Image pull policy for system-managed pods, such as Instance Manager, engine images, and CSI Driver. Changes to the image pull policy are applied only after the system-managed pods restart.
264 systemManagedPodsImagePullPolicy: ~
265 # -- Setting that allows you to create and attach a volume without having all replicas scheduled at the time of creation.
266 allowVolumeCreationWithDegradedAvailability: ~
267 # -- Setting that allows Longhorn to automatically clean up the system-generated snapshot after replica rebuilding is completed.
268 autoCleanupSystemGeneratedSnapshot: ~
269 # -- Setting that allows Longhorn to automatically clean up the snapshot generated by a recurring backup job.
270 autoCleanupRecurringJobBackupSnapshot: ~
271 # -- Maximum number of engines that are allowed to concurrently upgrade on each node after Longhorn Manager is upgraded. When the value is "0", Longhorn does not automatically upgrade volume engines to the new default engine image version.
272 concurrentAutomaticEngineUpgradePerNodeLimit: ~
273 # -- Number of minutes that Longhorn waits before cleaning up the backing image file when no replicas in the disk are using it.
274 backingImageCleanupWaitInterval: ~
275 # -- Number of seconds that Longhorn waits before downloading a backing image file again when the status of all image disk files changes to "failed" or "unknown".
276 backingImageRecoveryWaitInterval: ~
277 # -- Percentage of the total allocatable CPU resources on each node to be reserved for each instance manager pod when the V1 Data Engine is enabled. The default value is "12".
278 guaranteedInstanceManagerCPU: ~
279 # -- Setting that notifies Longhorn that the cluster is using the Kubernetes Cluster Autoscaler.
280 kubernetesClusterAutoscalerEnabled: ~
281 # -- Setting that allows Longhorn to automatically delete an orphaned resource and the corresponding data (for example, stale replicas). Orphaned resources on failed or unknown nodes are not automatically cleaned up.
282 orphanAutoDeletion: ~
283 # -- Storage network for in-cluster traffic. When unspecified, Longhorn uses the Kubernetes cluster network.
284 storageNetwork: ~
285 # -- Flag that prevents accidental uninstallation of Longhorn.
286 deletingConfirmationFlag: ~
287 # -- Timeout between the Longhorn Engine and replicas. Specify a value between "8" and "30" seconds. The default value is "8".
288 engineReplicaTimeout: ~
289 # -- Setting that allows you to enable and disable snapshot hashing and data integrity checks.
290 snapshotDataIntegrity: ~
291 # -- Setting that allows disabling of snapshot hashing after snapshot creation to minimize impact on system performance.
292 snapshotDataIntegrityImmediateCheckAfterSnapshotCreation: ~
293 # -- Setting that defines when Longhorn checks the integrity of data in snapshot disk files. You must use the Unix cron expression format.
294 snapshotDataIntegrityCronjob: ~
295 # -- Setting that allows Longhorn to automatically mark the latest snapshot and its parent files as removed during a filesystem trim. Longhorn does not remove snapshots containing multiple child files.
296 removeSnapshotsDuringFilesystemTrim: ~
297 # -- Setting that allows fast rebuilding of replicas using the checksum of snapshot disk files. Before enabling this setting, you must set the snapshot-data-integrity value to "enable" or "fast-check".
298 fastReplicaRebuildEnabled: ~
299 # -- Number of seconds that an HTTP client waits for a response from a File Sync server before considering the connection to have failed.
300 replicaFileSyncHttpClientTimeout: ~
301 # -- Log levels that indicate the type and severity of logs in Longhorn Manager. The default value is "Info". (Options: "Panic", "Fatal", "Error", "Warn", "Info", "Debug", "Trace")
302 logLevel: ~
303 # -- Setting that allows you to specify a backup compression method.
304 backupCompressionMethod: ~
305 # -- Maximum number of worker threads that can concurrently run for each backup.
306 backupConcurrentLimit: ~
307 # -- Maximum number of worker threads that can concurrently run for each restore operation.
308 restoreConcurrentLimit: ~
309 # -- Setting that allows you to enable the V1 Data Engine.
310 v1DataEngine: ~
311 # -- Setting that allows you to enable the V2 Data Engine, which is based on the Storage Performance Development Kit (SPDK). The V2 Data Engine is a preview feature and should not be used in production environments.
312 v2DataEngine: ~
313 # -- Setting that allows you to configure maximum huge page size (in MiB) for the V2 Data Engine.
314 v2DataEngineHugepageLimit: ~
315 # -- Setting that allows rebuilding of offline replicas for volumes using the V2 Data Engine.
316 offlineReplicaRebuilding: ~
317 # -- Number of millicpus on each node to be reserved for each Instance Manager pod when the V2 Data Engine is enabled. The default value is "1250".
318 v2DataEngineGuaranteedInstanceManagerCPU: ~
319 # -- Setting that allows scheduling of empty node selector volumes to any node.
320 allowEmptyNodeSelectorVolume: ~
321 # -- Setting that allows scheduling of empty disk selector volumes to any disk.
322 allowEmptyDiskSelectorVolume: ~
323 # -- Setting that allows Longhorn to periodically collect anonymous usage data for product improvement purposes. Longhorn sends collected data to the [Upgrade Responder](https://github.com/longhorn/upgrade-responder) server, which is the data source of the Longhorn Public Metrics Dashboard (https://metrics.longhorn.io). The Upgrade Responder server does not store data that can be used to identify clients, including IP addresses.
324 allowCollectingLonghornUsageMetrics: ~
325 # -- Setting that temporarily prevents all attempts to purge volume snapshots.
326 disableSnapshotPurge: ~
327 # -- Maximum snapshot count for a volume. The value should be between 2 to 250
328 snapshotMaxCount: ~
329
330privateRegistry:
331 # -- Setting that allows you to create a private registry secret.
332 createSecret: ~
333 # -- URL of a private registry. When unspecified, Longhorn uses the default system registry.
334 registryUrl: ~
335 # -- User account used for authenticating with a private registry.
336 registryUser: ~
337 # -- Password for authenticating with a private registry.
338 registryPasswd: ~
339 # -- Kubernetes secret that allows you to pull images from a private registry. This setting applies only when creation of private registry secrets is enabled. You must include the private registry name in the secret name.
340 registrySecret: ~
341
342longhornManager:
343 log:
344 # -- Format of Longhorn Manager logs. (Options: "plain", "json")
345 format: plain
346 # -- PriorityClass for Longhorn Manager.
347 priorityClass: *defaultPriorityClassNameRef
348 # -- Toleration for Longhorn Manager on nodes allowed to run Longhorn components.
349 tolerations: []
350 ## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above
351 ## and uncomment this example block
352 # - key: "key"
353 # operator: "Equal"
354 # value: "value"
355 # effect: "NoSchedule"
356 # -- Node selector for Longhorn Manager. Specify the nodes allowed to run Longhorn Manager.
357 nodeSelector: {}
358 ## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above
359 ## and uncomment this example block
360 # label-key1: "label-value1"
361 # label-key2: "label-value2"
362 # -- Annotation for the Longhorn Manager service.
363 serviceAnnotations: {}
364 ## If you want to set annotations for the Longhorn Manager service, delete the `{}` in the line above
365 ## and uncomment this example block
366 # annotation-key1: "annotation-value1"
367 # annotation-key2: "annotation-value2"
368
369longhornDriver:
370 log:
371 # -- Format of longhorn-driver logs. (Options: "plain", "json")
372 format: plain
373 # -- PriorityClass for Longhorn Driver.
374 priorityClass: *defaultPriorityClassNameRef
375 # -- Toleration for Longhorn Driver on nodes allowed to run Longhorn components.
376 tolerations: []
377 ## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above
378 ## and uncomment this example block
379 # - key: "key"
380 # operator: "Equal"
381 # value: "value"
382 # effect: "NoSchedule"
383 # -- Node selector for Longhorn Driver. Specify the nodes allowed to run Longhorn Driver.
384 nodeSelector: {}
385 ## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above
386 ## and uncomment this example block
387 # label-key1: "label-value1"
388 # label-key2: "label-value2"
389
390longhornUI:
391 # -- Replica count for Longhorn UI.
392 replicas: 2
393 # -- PriorityClass for Longhorn UI.
394 priorityClass: *defaultPriorityClassNameRef
395 # -- Toleration for Longhorn UI on nodes allowed to run Longhorn components.
396 tolerations: []
397 ## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above
398 ## and uncomment this example block
399 # - key: "key"
400 # operator: "Equal"
401 # value: "value"
402 # effect: "NoSchedule"
403 # -- Node selector for Longhorn UI. Specify the nodes allowed to run Longhorn UI.
404 nodeSelector: {}
405 ## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above
406 ## and uncomment this example block
407 # label-key1: "label-value1"
408 # label-key2: "label-value2"
409
410ingress:
411 # -- Setting that allows Longhorn to generate ingress records for the Longhorn UI service.
412 enabled: false
413
414 # -- IngressClass resource that contains ingress configuration, including the name of the Ingress controller.
415 # ingressClassName can replace the kubernetes.io/ingress.class annotation used in earlier Kubernetes releases.
416 ingressClassName: ~
417
418 # -- Hostname of the Layer 7 load balancer.
419 host: sslip.io
420
421 # -- Setting that allows you to enable TLS on ingress records.
422 tls: false
423
424 # -- Setting that allows you to enable secure connections to the Longhorn UI service via port 443.
425 secureBackends: false
426
427 # -- TLS secret that contains the private key and certificate to be used for TLS. This setting applies only when TLS is enabled on ingress records.
428 tlsSecret: longhorn.local-tls
429
430 # -- Default ingress path. You can access the Longhorn UI by following the full ingress path {{host}}+{{path}}.
431 path: /
432
433 ## If you're using kube-lego, you will want to add:
434 ## kubernetes.io/tls-acme: true
435 ##
436 ## For a full list of possible ingress annotations, please see
437 ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md
438 ##
439 ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
440 # -- Ingress annotations in the form of key-value pairs.
441 annotations:
442 # kubernetes.io/ingress.class: nginx
443 # kubernetes.io/tls-acme: true
444
445 # -- Secret that contains a TLS private key and certificate. Use secrets if you want to use your own certificates to secure ingresses.
446 secrets:
447 ## If you're providing your own certificates, please use this to add the certificates as secrets
448 ## key and certificate should start with -----BEGIN CERTIFICATE----- or
449 ## -----BEGIN RSA PRIVATE KEY-----
450 ##
451 ## name should line up with a tlsSecret set further up
452 ## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set
453 ##
454 ## It is also possible to create and manage the certificates outside of this helm chart
455 ## Please see README.md for more information
456 # - name: longhorn.local-tls
457 # key:
458 # certificate:
459
460# -- Setting that allows you to enable pod security policies (PSPs) that allow privileged Longhorn pods to start. This setting applies only to clusters running Kubernetes 1.25 and earlier, and with the built-in Pod Security admission controller enabled.
461enablePSP: false
462
463# -- Specify override namespace, specifically this is useful for using longhorn as sub-chart and its release namespace is not the `longhorn-system`.
464namespaceOverride: ""
465
466# -- Annotation for the Longhorn Manager DaemonSet pods. This setting is optional.
467annotations: {}
468
469serviceAccount:
470 # -- Annotations to add to the service account
471 annotations: {}
472
473metrics:
474 serviceMonitor:
475 # -- Setting that allows the creation of a Prometheus ServiceMonitor resource for Longhorn Manager components.
476 enabled: false
477 # -- Additional labels for the Prometheus ServiceMonitor resource.
478 additionalLabels: {}
479 # -- Annotations for the Prometheus ServiceMonitor resource.
480 annotations: {}
481 # -- Interval at which Prometheus scrapes the metrics from the target.
482 interval: ""
483 # -- Timeout after which Prometheus considers the scrape to be failed.
484 scrapeTimeout: ""
485 # -- Configures the relabeling rules to apply the target’s metadata labels. See the [Prometheus Operator
486 # documentation](https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.Endpoint) for
487 # formatting details.
488 relabelings: []
489 # -- Configures the relabeling rules to apply to the samples before ingestion. See the [Prometheus Operator
490 # documentation](https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.Endpoint) for
491 # formatting details.
492 metricRelabelings: []
493
494## openshift settings
495openshift:
496 # -- Setting that allows Longhorn to integrate with OpenShift.
497 enabled: false
498 ui:
499 # -- Route for connections between Longhorn and the OpenShift web console.
500 route: "longhorn-ui"
501 # -- Port for accessing the OpenShift web console.
502 port: 443
503 # -- Port for proxy that provides access to the OpenShift web console.
504 proxy: 8443
505
506# -- Setting that allows Longhorn to generate code coverage profiles.
507enableGoCoverDir: false