update charts
diff --git a/charts/longhorn/questions.yaml b/charts/longhorn/questions.yaml
index b4ae9de..b53b0fe 100644
--- a/charts/longhorn/questions.yaml
+++ b/charts/longhorn/questions.yaml
@@ -17,7 +17,7 @@
     label: Longhorn Manager Image Repository
     group: "Longhorn Images Settings"
   - variable: image.longhorn.manager.tag
-    default: v1.4.1
+    default: v1.5.2
     description: "Specify Longhorn Manager Image Tag"
     type: string
     label: Longhorn Manager Image Tag
@@ -29,7 +29,7 @@
     label: Longhorn Engine Image Repository
     group: "Longhorn Images Settings"
   - variable: image.longhorn.engine.tag
-    default: v1.4.1
+    default: v1.5.2
     description: "Specify Longhorn Engine Image Tag"
     type: string
     label: Longhorn Engine Image Tag
@@ -41,7 +41,7 @@
     label: Longhorn UI Image Repository
     group: "Longhorn Images Settings"
   - variable: image.longhorn.ui.tag
-    default: v1.4.1
+    default: v1.5.2
     description: "Specify Longhorn UI Image Tag"
     type: string
     label: Longhorn UI Image Tag
@@ -53,7 +53,7 @@
     label: Longhorn Instance Manager Image Repository
     group: "Longhorn Images Settings"
   - variable: image.longhorn.instanceManager.tag
-    default: v1.4.1
+    default: v1.5.2
     description: "Specify Longhorn Instance Manager Image Tag"
     type: string
     label: Longhorn Instance Manager Image Tag
@@ -65,7 +65,7 @@
     label: Longhorn Share Manager Image Repository
     group: "Longhorn Images Settings"
   - variable: image.longhorn.shareManager.tag
-    default: v1.4.1
+    default: v1.5.2
     description: "Specify Longhorn Share Manager Image Tag"
     type: string
     label: Longhorn Share Manager Image Tag
@@ -77,7 +77,7 @@
     label: Longhorn Backing Image Manager Image Repository
     group: "Longhorn Images Settings"
   - variable: image.longhorn.backingImageManager.tag
-    default: v1.4.1
+    default: v1.5.2
     description: "Specify Longhorn Backing Image Manager Image Tag"
     type: string
     label: Longhorn Backing Image Manager Image Tag
@@ -89,7 +89,7 @@
     label: Longhorn Support Bundle Kit Image Repository
     group: "Longhorn Images Settings"
   - variable: image.longhorn.supportBundleKit.tag
-    default: v0.0.17
+    default: v0.0.27
     description: "Specify Longhorn Support Bundle Manager Image Tag"
     type: string
     label: Longhorn Support Bundle Kit Image Tag
@@ -101,7 +101,7 @@
     label: Longhorn CSI Attacher Image Repository
     group: "Longhorn CSI Driver Images"
   - variable: image.csi.attacher.tag
-    default: v3.4.0
+    default: v4.2.0
     description: "Specify CSI attacher image tag. Leave blank to autodetect."
     type: string
     label: Longhorn CSI Attacher Image Tag
@@ -113,7 +113,7 @@
     label: Longhorn CSI Provisioner Image Repository
     group: "Longhorn CSI Driver Images"
   - variable: image.csi.provisioner.tag
-    default: v2.1.2
+    default: v3.4.1
     description: "Specify CSI provisioner image tag. Leave blank to autodetect."
     type: string
     label: Longhorn CSI Provisioner Image Tag
@@ -125,7 +125,7 @@
     label: Longhorn CSI Node Driver Registrar Image Repository
     group: "Longhorn CSI Driver Images"
   - variable: image.csi.nodeDriverRegistrar.tag
-    default: v2.5.0
+    default: v2.7.0
     description: "Specify CSI Node Driver Registrar image tag. Leave blank to autodetect."
     type: string
     label: Longhorn CSI Node Driver Registrar Image Tag
@@ -137,7 +137,7 @@
     label: Longhorn CSI Driver Resizer Image Repository
     group: "Longhorn CSI Driver Images"
   - variable: image.csi.resizer.tag
-    default: v1.3.0
+    default: v1.7.0
     description: "Specify CSI Driver Resizer image tag. Leave blank to autodetect."
     type: string
     label: Longhorn CSI Driver Resizer Image Tag
@@ -149,7 +149,7 @@
     label: Longhorn CSI Driver Snapshotter Image Repository
     group: "Longhorn CSI Driver Images"
   - variable: image.csi.snapshotter.tag
-    default: v5.0.1
+    default: v6.2.1
     description: "Specify CSI Driver Snapshotter image tag. Leave blank to autodetect."
     type: string
     label: Longhorn CSI Driver Snapshotter Image Tag
@@ -161,7 +161,7 @@
     label: Longhorn CSI Liveness Probe Image Repository
     group: "Longhorn CSI Driver Images"
   - variable: image.csi.livenessProbe.tag
-    default: v2.8.0
+    default: v2.9.0
     description: "Specify CSI liveness probe image tag. Leave blank to autodetect."
     type: string
     label: Longhorn CSI Liveness Probe Image Tag
@@ -327,6 +327,14 @@
     min: 0
     max: 100
     default: 25
+  - variable: defaultSettings.storageReservedPercentageForDefaultDisk
+    label: Storage Reserved Percentage For Default Disk
+    description: "The reserved percentage specifies the percentage of disk space that will not be allocated to the default disk on each new Longhorn node."
+    group: "Longhorn Default Settings"
+    type: int
+    min: 0
+    max: 100
+    default: 30
   - variable: defaultSettings.upgradeChecker
     label: Enable Upgrade Checker
     description: 'Upgrade Checker will check for new Longhorn version periodically. When there is a new version available, a notification will appear in the UI. By default true.'
@@ -439,24 +447,19 @@
     - "delete-deployment-pod"
     - "delete-both-statefulset-and-deployment-pod"
     default: "do-nothing"
-  - variable: defaultSettings.allowNodeDrainWithLastHealthyReplica
-    label: Allow Node Drain with the Last Healthy Replica
-    description: "By default, Longhorn will block `kubectl drain` action on a node if the node contains the last healthy replica of a volume.
-If this setting is enabled, Longhorn will **not** block `kubectl drain` action on a node even if the node contains the last healthy replica of a volume."
+  - variable: defaultSettings.nodeDrainPolicy
+    label: Node Drain Policy
+    description: "Define the policy to use when a node with the last healthy replica of a volume is drained.
+- **block-if-contains-last-replica** Longhorn will block the drain when the node contains the last healthy replica of a volume.
+- **allow-if-replica-is-stopped** Longhorn will allow the drain when the node contains the last healthy replica of a volume but the replica is stopped. WARNING: possible data loss if the node is removed after draining. Select this option if you want to drain the node and do in-place upgrade/maintenance.
+- **always-allow** Longhorn will allow the drain even though the node contains the last healthy replica of a volume. WARNING: possible data loss if the node is removed after draining. Also possible data corruption if the last replica was running during the draining."
     group: "Longhorn Default Settings"
-    type: boolean
-    default: "false"
-  - variable: defaultSettings.mkfsExt4Parameters
-    label: Custom mkfs.ext4 parameters
-    description: "Allows setting additional filesystem creation parameters for ext4. For older host kernels it might be necessary to disable the optional ext4 metadata_csum feature by specifying `-O ^64bit,^metadata_csum`."
-    group: "Longhorn Default Settings"
-    type: string
-  - variable: defaultSettings.disableReplicaRebuild
-    label: Disable Replica Rebuild
-    description: "This setting disable replica rebuild cross the whole cluster, eviction and data locality feature won't work if this setting is true. But doesn't have any impact to any current replica rebuild and restore disaster recovery volume."
-    group: "Longhorn Default Settings"
-    type: boolean
-    default: "false"
+    type: enum
+    options:
+      - "block-if-contains-last-replica"
+      - "allow-if-replica-is-stopped"
+      - "always-allow"
+    default: "block-if-contains-last-replica"
   - variable: defaultSettings.replicaReplenishmentWaitInterval
     label: Replica Replenishment Wait Interval
     description: "In seconds. The interval determines how long Longhorn will wait at least in order to reuse the existing data on a failed replica rather than directly creating a new replica for a degraded volume.
@@ -538,42 +541,30 @@
     type: int
     min: 0
     default: 300
-  - variable: defaultSettings.guaranteedEngineManagerCPU
-    label: Guaranteed Engine Manager CPU
-    description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each engine manager Pod. For example, 10 means 10% of the total CPU on a node will be allocated to each engine manager pod on this node. This will help maintain engine stability during high node workload.
-    In order to prevent unexpected volume engine crash as well as guarantee a relative acceptable IO performance, you can use the following formula to calculate a value for this setting:
-    Guaranteed Engine Manager CPU = The estimated max Longhorn volume engine count on a node * 0.1 / The total allocatable CPUs on the node * 100.
+  - variable: defaultSettings.guaranteedInstanceManagerCPU
+    label: Guaranteed Instance Manager CPU
+    description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each instance manager Pod. For example, 10 means 10% of the total CPU on a node will be allocated to each instance manager pod on this node. This will help maintain engine and replica stability during high node workload.
+    In order to prevent unexpected volume instance (engine/replica) crash as well as guarantee a relative acceptable IO performance, you can use the following formula to calculate a value for this setting:
+    `Guaranteed Instance Manager CPU = The estimated max Longhorn volume engine and replica count on a node * 0.1 / The total allocatable CPUs on the node * 100`
     The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting.
     If it's hard to estimate the usage now, you can leave it with the default value, which is 12%. Then you can tune it when there is no running workload using Longhorn volumes.
     WARNING:
-      - Value 0 means unsetting CPU requests for engine manager pods.
-      - Considering the possible new instance manager pods in the further system upgrade, this integer value is range from 0 to 40. And the sum with setting 'Guaranteed Engine Manager CPU' should not be greater than 40.
+      - Value 0 means unsetting CPU requests for instance manager pods.
+      - Considering the possible new instance manager pods in the further system upgrade, this integer value is range from 0 to 40. 
       - One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then.
-      - This global setting will be ignored for a node if the field \"EngineManagerCPURequest\" on the node is set.
-      - After this setting is changed, all engine manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES."
+      - This global setting will be ignored for a node if the field \"InstanceManagerCPURequest\" on the node is set.
+      - After this setting is changed, all instance manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES."
     group: "Longhorn Default Settings"
     type: int
     min: 0
     max: 40
     default: 12
-  - variable: defaultSettings.guaranteedReplicaManagerCPU
-    label: Guaranteed Replica Manager CPU
-    description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each replica manager Pod. 10 means 10% of the total CPU on a node will be allocated to each replica manager pod on this node. This will help maintain replica stability during high node workload.
-    In order to prevent unexpected volume replica crash as well as guarantee a relative acceptable IO performance, you can use the following formula to calculate a value for this setting:
-    Guaranteed Replica Manager CPU = The estimated max Longhorn volume replica count on a node * 0.1 / The total allocatable CPUs on the node * 100.
-    The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting.
-    If it's hard to estimate the usage now, you can leave it with the default value, which is 12%. Then you can tune it when there is no running workload using Longhorn volumes.
-    WARNING:
-      - Value 0 means unsetting CPU requests for replica manager pods.
-      - Considering the possible new instance manager pods in the further system upgrade, this integer value is range from 0 to 40. And the sum with setting 'Guaranteed Replica Manager CPU' should not be greater than 40.
-      - One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then.
-      - This global setting will be ignored for a node if the field \"ReplicaManagerCPURequest\" on the node is set.
-      - After this setting is changed, all replica manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES."
+  - variable: defaultSettings.logLevel
+    label: Log Level
+    description: "The log level Panic, Fatal, Error, Warn, Info, Debug, Trace used in longhorn manager. By default Debug."
     group: "Longhorn Default Settings"
-    type: int
-    min: 0
-    max: 40
-    default: 12
+    type: string
+    default: "Info"
 - variable: defaultSettings.kubernetesClusterAutoscalerEnabled
   label: Kubernetes Cluster Autoscaler Enabled (Experimental)
   description: "Enabling this setting will notify Longhorn that the cluster is using Kubernetes Cluster Autoscaler.
@@ -660,6 +651,50 @@
   group: "Longhorn Default Settings"
   type: int
   default: "30"
+- variable: defaultSettings.backupCompressionMethod
+  label: Backup Compression Method
+  description: "This setting allows users to specify backup compression method.
+  Available options are
+    - **none**: Disable the compression method. Suitable for multimedia data such as encoded images and videos.
+    - **lz4**: Fast compression method. Suitable for flat files.
+    - **gzip**: A bit of higher compression ratio but relatively slow."
+  group: "Longhorn Default Settings"
+  type: string
+  default: "lz4"
+- variable: defaultSettings.backupConcurrentLimit
+  label: Backup Concurrent Limit Per Backup
+  description: "This setting controls how many worker threads per backup concurrently."
+  group: "Longhorn Default Settings"
+  type: int
+  min: 1
+  default: 2
+- variable: defaultSettings.restoreConcurrentLimit
+  label: Restore Concurrent Limit Per Backup
+  description: "This setting controls how many worker threads per restore concurrently."
+  group: "Longhorn Default Settings"
+  type: int
+  min: 1
+  default: 2
+- variable: defaultSettings.v2DataEngine
+  label: V2 Data Engine
+  description: "This allows users to activate v2 data engine based on SPDK. Currently, it is in the preview phase and should not be utilized in a production environment.
+	WARNING:
+	  - DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES. Longhorn will block this setting update when there are attached volumes.
+	  - When applying the setting, Longhorn will restart all instance-manager pods.
+	  - When the V2 Data Engine is enabled, each instance-manager pod utilizes 1 CPU core. This high CPU usage is attributed to the spdk_tgt process running within each instance-manager pod. The spdk_tgt process is responsible for handling input/output (IO) operations and requires intensive polling. As a result, it consumes 100% of a dedicated CPU core to efficiently manage and process the IO requests, ensuring optimal performance and responsiveness for storage operations."
+  group: "Longhorn V2 Data Engine (Preview Feature) Settings"
+  type: boolean
+  default: false
+- variable: defaultSettings.offlineReplicaRebuilding
+  label: Offline Replica Rebuilding
+  description: ""This setting allows users to enable the offline replica rebuilding for volumes using v2 data engine."
+  group: "Longhorn V2 Data Engine (Preview Feature) Settings"
+  required: true
+  type: enum
+  options:
+  - "enabled"
+  - "disabled"
+  default: "enabled"
 - variable: persistence.defaultClass
   default: "true"
   description: "Set as default StorageClass for Longhorn"
@@ -708,18 +743,18 @@
     group: "Longhorn Storage Class Settings"
     type: string
     default:
-- variable: defaultSettings.defaultNodeSelector.enable
-  description: "Enable recurring Node selector for Longhorn StorageClass"
+- variable: persistence.defaultNodeSelector.enable
+  description: "Enable Node selector for Longhorn StorageClass"
   group: "Longhorn Storage Class Settings"
   label: Enable Storage Class Node Selector
   type: boolean
   default: false
   show_subquestion_if: true
   subquestions:
-  - variable: defaultSettings.defaultNodeSelector.selector
+  - variable: persistence.defaultNodeSelector.selector
     label: Storage Class Node Selector
-    description: 'We use NodeSelector when we want to bind PVC via StorageClass into desired mountpoint on the nodes tagged whith its value'
-    group: "Longhorn Default Settings"
+    description: 'We use NodeSelector when we want to bind PVC via StorageClass into desired mountpoint on the nodes tagged with its value'
+    group: "Longhorn Storage Class Settings"
     type: string
     default:
 - variable: persistence.backingImage.enable
@@ -835,3 +870,21 @@
   label: Rancher Windows Cluster
   type: boolean
   group: "Other Settings"
+- variable: networkPolicies.enabled
+  description: "Enable NetworkPolicies to limit access to the longhorn pods.
+  Warning: The Rancher Proxy will not work if this feature is enabled and a custom NetworkPolicy must be added."
+  group: "Other Settings"
+  label: Network Policies
+  default: "false"
+  type: boolean
+  subquestions:
+  - variable: networkPolicies.type
+    label: Network Policies for Ingress
+    description: "Create the policy to allow access for the ingress, select the distribution."
+    show_if: "networkPolicies.enabled=true&&ingress.enabled=true"
+    type: enum
+    default: "rke2"
+    options:
+      - "rke1"
+      - "rke2"
+      - "k3s"