lighthouse on every infrastructure node
diff --git a/scripts/homelab/installer/ingress-nginx.sh b/scripts/homelab/installer/ingress-nginx.sh
index 65a45aa..248384d 100644
--- a/scripts/homelab/installer/ingress-nginx.sh
+++ b/scripts/homelab/installer/ingress-nginx.sh
@@ -1,72 +1,75 @@
#!/bin/sh
-helm upgrade --create-namespace \
- --namespace ingress-nginx \
- nginx ingress-nginx/ingress-nginx \
- --version 4.0.3 \
- --set fullNameOverride=nginx \
- --set controller.service.type=LoadBalancer \
- --set controller.ingressClassByName=true \
- --set controller.ingressClassResource.name=nginx \
- --set controller.ingressClassResource.enabled=true \
- --set controller.ingressClassResource.default=true \
- --set controller.ingressClassResource.controllerValue="k8s.io/ingress-nginx" \
- --set controller.extraArgs.default-ssl-certificate=ingress-nginx/cert-wildcard.lekva.me \
- --set controller.config.proxy-body-size="100M" \
- --set tcp.25="app-maddy/maddy:25" \
- --set tcp.143="app-maddy/maddy:143" \
- --set tcp.993="app-maddy/maddy:993" \
- --set tcp.587="app-maddy/maddy:587" \
- --set tcp.465="app-maddy/maddy:465"
+# helm upgrade --create-namespace \
+# --namespace ingress-nginx \
+# nginx ingress-nginx/ingress-nginx \
+# --version 4.0.3 \
+# --set fullNameOverride=nginx \
+# --set controller.service.type=LoadBalancer \
+# --set controller.ingressClassByName=true \
+# --set controller.ingressClassResource.name=nginx \
+# --set controller.ingressClassResource.enabled=true \
+# --set controller.ingressClassResource.default=true \
+# --set controller.ingressClassResource.controllerValue="k8s.io/ingress-nginx" \
+# --set controller.extraArgs.default-ssl-certificate=ingress-nginx/cert-wildcard.lekva.me \
+# --set controller.config.proxy-body-size="100M" \
+# --set tcp.25="app-maddy/maddy:25" \
+# --set tcp.143="app-maddy/maddy:143" \
+# --set tcp.993="app-maddy/maddy:993" \
+# --set tcp.587="app-maddy/maddy:587" \
+# --set tcp.465="app-maddy/maddy:465"
# # --set udp.4242="ingress-nginx-private/lighthouse:4242"
# kubectl create configmap \
# -n ingress-nginx-private \
-# lighthouse-cert \
-# --from-file ../../apps/nebula/lighthouse-cert/
-# kubectl create configmap \
-# -n ingress-nginx-private \
-# ca-cert \
-# --from-file ../../apps/nebula/ca-cert/ca.crt
-# kubectl create configmap \
-# -n ingress-nginx-private \
# lighthouse-config \
# --from-file ../../apps/nebula/lighthouse.yaml
+kubectl create configmap \
+ -n ingress-nginx-private \
+ nodes-lighthouse-config \
+ --from-file installer/nodes-lighthouse.yaml
-helm upgrade --create-namespace \
- --namespace ingress-nginx-private \
- nginx ingress-nginx/ingress-nginx \
- --version 4.0.3 \
- --set fullnameOverride=nginx-private \
- --set controller.service.type=ClusterIP \
- --set controller.ingressClassByName=true \
- --set controller.ingressClassResource.name=nginx-private \
- --set controller.ingressClassResource.enabled=true \
- --set controller.ingressClassResource.default=false \
- --set controller.ingressClassResource.controllerValue="k8s.io/ingress-nginx-private" \
- --set controller.extraVolumes[0].name="lighthouse-cert" \
- --set controller.extraVolumes[0].configMap.name="lighthouse-cert" \
- --set controller.extraVolumes[1].name=ca-cert \
- --set controller.extraVolumes[1].configMap.name=ca-cert \
- --set controller.extraVolumes[2].name=config \
- --set controller.extraVolumes[2].configMap.name=lighthouse-config \
- --set controller.extraContainers[0].name=nebula \
- --set controller.extraContainers[0].image=giolekva/nebula:latest \
- --set controller.extraContainers[0].imagePullPolicy=IfNotPresent \
- --set controller.extraContainers[0].securityContext.capabilities.add[0]=NET_ADMIN \
- --set controller.extraContainers[0].securityContext.privileged=true \
- --set controller.extraContainers[0].ports[0].name=nebula \
- --set controller.extraContainers[0].ports[0].containerPort=4242 \
- --set controller.extraContainers[0].ports[0].protocol=UDP \
- --set controller.extraContainers[0].command[0]="nebula" \
- --set controller.extraContainers[0].command[1]="--config=/etc/nebula/config/lighthouse.yaml" \
- --set controller.extraContainers[0].volumeMounts[0].name=lighthouse-cert \
- --set controller.extraContainers[0].volumeMounts[0].mountPath=/etc/nebula/lighthouse \
- --set controller.extraContainers[0].volumeMounts[1].name=ca-cert \
- --set controller.extraContainers[0].volumeMounts[1].mountPath=/etc/nebula/ca \
- --set controller.extraContainers[0].volumeMounts[2].name=config \
- --set controller.extraContainers[0].volumeMounts[2].mountPath=/etc/nebula/config \
- --set controller.config.bind-address="111.0.0.1" \
- --set controller.config.proxy-body-size="0" \
- --set udp.53="pihole/pihole-dns-udp:53" \
- --set tcp.53="pihole/pihole-dns-tcp:53"
+kubectl apply -f installer/nodes-infrastructure.yaml
+
+
+# kubectl apply -f installer/lighthouse-node.yaml
+
+# helm upgrade --create-namespace \
+# --namespace ingress-nginx-private \
+# nginx ingress-nginx/ingress-nginx \
+# --version 4.0.3 \
+# --set fullnameOverride=nginx-private \
+# --set controller.service.type=ClusterIP \
+# --set controller.ingressClassByName=true \
+# --set controller.ingressClassResource.name=nginx-private \
+# --set controller.ingressClassResource.enabled=true \
+# --set controller.ingressClassResource.default=false \
+# --set controller.ingressClassResource.controllerValue="k8s.io/ingress-nginx-private" \
+# --set controller.extraVolumes[0].name="lighthouse-cert" \
+# --set controller.extraVolumes[0].secret.secretName="node-lighthouse-cert" \
+# --set controller.extraVolumes[1].name=config \
+# --set controller.extraVolumes[1].configMap.name=lighthouse-config \
+# --set controller.extraContainers[0].name=lighthouse \
+# --set controller.extraContainers[0].image=giolekva/nebula:latest \
+# --set controller.extraContainers[0].imagePullPolicy=IfNotPresent \
+# --set controller.extraContainers[0].securityContext.capabilities.add[0]=NET_ADMIN \
+# --set controller.extraContainers[0].securityContext.privileged=true \
+# --set controller.extraContainers[0].ports[0].name=nebula \
+# --set controller.extraContainers[0].ports[0].containerPort=4242 \
+# --set controller.extraContainers[0].ports[0].protocol=UDP \
+# --set controller.extraContainers[0].command[0]="nebula" \
+# --set controller.extraContainers[0].command[1]="--config=/etc/nebula/config/lighthouse.yaml" \
+# --set controller.extraContainers[0].volumeMounts[0].name=lighthouse-cert \
+# --set controller.extraContainers[0].volumeMounts[0].mountPath=/etc/nebula/lighthouse \
+# --set controller.extraContainers[0].volumeMounts[1].name=config \
+# --set controller.extraContainers[0].volumeMounts[1].mountPath=/etc/nebula/config \
+# --set controller.config.bind-address="111.0.0.1" \
+# --set controller.config.proxy-body-size="0" \
+# --set udp.53="pihole/pihole-dns-udp:53" \
+# --set tcp.53="pihole/pihole-dns-tcp:53"
+
+ # # --set controller.extraVolumes[1].name=ca-cert \
+ # # --set controller.extraVolumes[1].configMap.name=ca-cert \
+
+ # # --set controller.extraContainers[0].volumeMounts[1].name=ca-cert \
+ # # --set controller.extraContainers[0].volumeMounts[1].mountPath=/etc/nebula/ca \
diff --git a/scripts/homelab/installer/lighthouse-node.yaml b/scripts/homelab/installer/lighthouse-node.yaml
new file mode 100644
index 0000000..c286f91
--- /dev/null
+++ b/scripts/homelab/installer/lighthouse-node.yaml
@@ -0,0 +1,18 @@
+apiVersion: lekva.me/v1
+kind: NebulaCA
+metadata:
+ name: pcloud
+ namespace: ingress-nginx-private
+spec:
+ secretName: ca-pcloud-cert
+---
+apiVersion: lekva.me/v1
+kind: NebulaNode
+metadata:
+ name: lighthouse
+ namespace: ingress-nginx-private
+spec:
+ caName: pcloud
+ caNamespace: ingress-nginx-private
+ ipCidr: "111.0.0.1/24"
+ secretName: node-lighthouse-cert
diff --git a/scripts/homelab/installer/nodes-infrastructure.yaml b/scripts/homelab/installer/nodes-infrastructure.yaml
new file mode 100644
index 0000000..d40eb87
--- /dev/null
+++ b/scripts/homelab/installer/nodes-infrastructure.yaml
@@ -0,0 +1,206 @@
+apiVersion: lekva.me/v1
+kind: NebulaNode
+metadata:
+ name: rpi111
+ namespace: ingress-nginx-private
+spec:
+ caName: pcloud
+ caNamespace: ingress-nginx-private
+ ipCidr: "111.0.0.111/24"
+ secretName: node-rpi111-cert
+---
+apiVersion: lekva.me/v1
+kind: NebulaNode
+metadata:
+ name: rpi112
+ namespace: ingress-nginx-private
+spec:
+ caName: pcloud
+ caNamespace: ingress-nginx-private
+ ipCidr: "111.0.0.112/24"
+ secretName: node-rpi112-cert
+---
+apiVersion: lekva.me/v1
+kind: NebulaNode
+metadata:
+ name: rpi113
+ namespace: ingress-nginx-private
+spec:
+ caName: pcloud
+ caNamespace: ingress-nginx-private
+ ipCidr: "111.0.0.113/24"
+ secretName: node-rpi113-cert
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: lighthouse-rpi111
+ namespace: ingress-nginx-private
+spec:
+ selector:
+ matchLabels:
+ app: lighthouse-rpi111
+ template:
+ metadata:
+ labels:
+ app: lighthouse-rpi111
+ spec:
+ restartPolicy: Always
+ hostNetwork: true
+ volumes:
+ - name: cert
+ secret:
+ secretName: node-rpi111-cert
+ - name: config
+ configMap:
+ name: nodes-lighthouse-config
+ containers:
+ - name: lighthouse
+ image: giolekva/nebula:latest
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ capabilities:
+ add: ["NET_ADMIN"]
+ privileged: true
+ ports:
+ - name: lighthouse
+ containerPort: 4242
+ protocol: UDP
+ command: ["nebula", "--config=/etc/nebula/config/nodes-lighthouse.yaml"]
+ volumeMounts:
+ - name: cert
+ mountPath: /etc/nebula/lighthouse
+ readOnly: true
+ - name: config
+ mountPath: /etc/nebula/config
+ readOnly: true
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - rpi111
+ tolerations:
+ - key: "pcloud"
+ operator: "Equal"
+ value: "role"
+ effect: "NoSchedule"
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: lighthouse-rpi112
+ namespace: ingress-nginx-private
+spec:
+ selector:
+ matchLabels:
+ app: lighthouse-rpi112
+ template:
+ metadata:
+ labels:
+ app: lighthouse-rpi112
+ spec:
+ restartPolicy: Always
+ hostNetwork: true
+ volumes:
+ - name: cert
+ secret:
+ secretName: node-rpi112-cert
+ - name: config
+ configMap:
+ name: nodes-lighthouse-config
+ containers:
+ - name: lighthouse
+ image: giolekva/nebula:latest
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ capabilities:
+ add: ["NET_ADMIN"]
+ privileged: true
+ ports:
+ - name: lighthouse
+ containerPort: 4242
+ protocol: UDP
+ command: ["nebula", "--config=/etc/nebula/config/nodes-lighthouse.yaml"]
+ volumeMounts:
+ - name: cert
+ mountPath: /etc/nebula/lighthouse
+ readOnly: true
+ - name: config
+ mountPath: /etc/nebula/config
+ readOnly: true
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - rpi112
+ tolerations:
+ - key: "pcloud"
+ operator: "Equal"
+ value: "role"
+ effect: "NoSchedule"
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: lighthouse-rpi113
+ namespace: ingress-nginx-private
+spec:
+ selector:
+ matchLabels:
+ app: lighthouse-rpi113
+ template:
+ metadata:
+ labels:
+ app: lighthouse-rpi113
+ spec:
+ restartPolicy: Always
+ hostNetwork: true
+ volumes:
+ - name: cert
+ secret:
+ secretName: node-rpi113-cert
+ - name: config
+ configMap:
+ name: nodes-lighthouse-config
+ containers:
+ - name: lighthouse
+ image: giolekva/nebula:latest
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ capabilities:
+ add: ["NET_ADMIN"]
+ privileged: true
+ ports:
+ - name: lighthouse
+ containerPort: 4242
+ protocol: UDP
+ command: ["nebula", "--config=/etc/nebula/config/nodes-lighthouse.yaml"]
+ volumeMounts:
+ - name: cert
+ mountPath: /etc/nebula/lighthouse
+ readOnly: true
+ - name: config
+ mountPath: /etc/nebula/config
+ readOnly: true
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - rpi113
+ tolerations:
+ - key: "pcloud"
+ operator: "Equal"
+ value: "role"
+ effect: "NoSchedule"
diff --git a/scripts/homelab/installer/nodes-lighthouse.yaml b/scripts/homelab/installer/nodes-lighthouse.yaml
new file mode 100644
index 0000000..ef32754
--- /dev/null
+++ b/scripts/homelab/installer/nodes-lighthouse.yaml
@@ -0,0 +1,258 @@
+# This is the nebula example configuration file. You must edit, at a minimum, the static_host_map, lighthouse, and firewall sections
+# Some options in this file are HUPable, including the pki section. (A HUP will reload credentials from disk without affecting existing tunnels)
+
+# PKI defines the location of credentials for this node. Each of these can also be inlined by using the yaml ": |" syntax.
+pki:
+ # The CAs that are accepted by this node. Must contain one or more certificates created by 'nebula-cert ca'
+ ##ca: /etc/nebula/ca/ca.crt
+ ca: /etc/nebula/lighthouse/ca.crt
+ cert: /etc/nebula/lighthouse/host.crt
+ key: /etc/nebula/lighthouse/host.key
+ #blocklist is a list of certificate fingerprints that we will refuse to talk to
+ #blocklist:
+ # - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72
+
+# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
+# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
+# The syntax is:
+# "{nebula ip}": ["{routable ip/dns name}:{routable port}"]
+# Example, if your lighthouse has the nebula IP of 192.168.100.1 and has the real ip address of 100.64.22.11 and runs on port 4242:
+static_host_map:
+ "111.0.0.1": ["46.49.35.44:4242"]
+
+
+lighthouse:
+ # am_lighthouse is used to enable lighthouse functionality for a node. This should ONLY be true on nodes
+ # you have configured to be lighthouses in your network
+ am_lighthouse: false
+ # serve_dns optionally starts a dns listener that responds to various queries and can even be
+ # delegated to for resolution
+ #serve_dns: false
+ #dns:
+ # The DNS host defines the IP to bind the dns listener to. This also allows binding to the nebula node IP.
+ #host: 0.0.0.0
+ #port: 53
+ # interval is the number of seconds between updates from this node to a lighthouse.
+ # during updates, a node sends information about its current IP addresses to each node.
+ interval: 60
+ # hosts is a list of lighthouse hosts this node should report to and query from
+ # IMPORTANT: THIS SHOULD BE EMPTY ON LIGHTHOUSE NODES
+ # IMPORTANT2: THIS SHOULD BE LIGHTHOUSES' NEBULA IPs, NOT LIGHTHOUSES' REAL ROUTABLE IPs
+ hosts:
+ - "111.0.0.1"
+
+ # remote_allow_list allows you to control ip ranges that this node will
+ # consider when handshaking to another node. By default, any remote IPs are
+ # allowed. You can provide CIDRs here with `true` to allow and `false` to
+ # deny. The most specific CIDR rule applies to each remote. If all rules are
+ # "allow", the default will be "deny", and vice-versa. If both "allow" and
+ # "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the
+ # default.
+ #remote_allow_list:
+ # Example to block IPs from this subnet from being used for remote IPs.
+ #"172.16.0.0/12": false
+
+ # A more complicated example, allow public IPs but only private IPs from a specific subnet
+ #"0.0.0.0/0": true
+ #"10.0.0.0/8": false
+ #"10.42.42.0/24": true
+
+ # local_allow_list allows you to filter which local IP addresses we advertise
+ # to the lighthouses. This uses the same logic as `remote_allow_list`, but
+ # additionally, you can specify an `interfaces` map of regular expressions
+ # to match against interface names. The regexp must match the entire name.
+ # All interface rules must be either true or false (and the default will be
+ # the inverse). CIDR rules are matched after interface name rules.
+ # Default is all local IP addresses.
+ #local_allow_list:
+ # Example to block tun0 and all docker interfaces.
+ #interfaces:
+ #tun0: false
+ #'docker.*': false
+ # Example to only advertise this subnet to the lighthouse.
+ #"10.0.0.0/8": true
+
+# Port Nebula will be listening on. The default here is 4242. For a lighthouse node, the port should be defined,
+# however using port 0 will dynamically assign a port and is recommended for roaming nodes.
+listen:
+ # To listen on both any ipv4 and ipv6 use "[::]"
+ host: "[::]"
+ port: 4242
+ # Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg)
+ # default is 64, does not support reload
+ #batch: 64
+ # Configure socket buffers for the udp side (outside), leave unset to use the system defaults. Values will be doubled by the kernel
+ # Default is net.core.rmem_default and net.core.wmem_default (/proc/sys/net/core/rmem_default and /proc/sys/net/core/rmem_default)
+ # Maximum is limited by memory in the system, SO_RCVBUFFORCE and SO_SNDBUFFORCE is used to avoid having to raise the system wide
+ # max, net.core.rmem_max and net.core.wmem_max
+ #read_buffer: 10485760
+ #write_buffer: 10485760
+
+# EXPERIMENTAL: This option is currently only supported on linux and may
+# change in future minor releases.
+#
+# Routines is the number of thread pairs to run that consume from the tun and UDP queues.
+# Currently, this defaults to 1 which means we have 1 tun queue reader and 1
+# UDP queue reader. Setting this above one will set IFF_MULTI_QUEUE on the tun
+# device and SO_REUSEPORT on the UDP socket to allow multiple queues.
+#routines: 1
+
+punchy:
+ # Continues to punch inbound/outbound at a regular interval to avoid expiration of firewall nat mappings
+ punch: true
+
+ # respond means that a node you are trying to reach will connect back out to you if your hole punching fails
+ # this is extremely useful if one node is behind a difficult nat, such as a symmetric NAT
+ # Default is false
+ #respond: true
+
+ # delays a punch response for misbehaving NATs, default is 1 second, respond must be true to take effect
+ #delay: 1s
+
+# Cipher allows you to choose between the available ciphers for your network. Options are chachapoly or aes
+# IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously!
+cipher: chachapoly
+
+# Local range is used to define a hint about the local network range, which speeds up discovering the fastest
+# path to a network adjacent nebula node.
+#local_range: "172.16.0.0/24"
+
+# sshd can expose informational and administrative functions via ssh this is a
+#sshd:
+ # Toggles the feature
+ #enabled: true
+ # Host and port to listen on, port 22 is not allowed for your safety
+ #listen: 127.0.0.1:2222
+ # A file containing the ssh host private key to use
+ # A decent way to generate one: ssh-keygen -t ed25519 -f ssh_host_ed25519_key -N "" < /dev/null
+ #host_key: ./ssh_host_ed25519_key
+ # A file containing a list of authorized public keys
+ #authorized_users:
+ #- user: steeeeve
+ # keys can be an array of strings or single string
+ #keys:
+ #- "ssh public key string"
+
+# Configure the private interface. Note: addr is baked into the nebula certificate
+tun:
+ # When tun is disabled, a lighthouse can be started without a local tun interface (and therefore without root)
+ disabled: false
+ # Name of the device
+ dev: nebula1
+ # Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert
+ drop_local_broadcast: false
+ # Toggles forwarding of multicast packets
+ drop_multicast: false
+ # Sets the transmit queue length, if you notice lots of transmit drops on the tun it may help to raise this number. Default is 500
+ tx_queue: 500
+ # Default MTU for every packet, safe setting is (and the default) 1300 for internet based traffic
+ mtu: 576
+ # Route based MTU overrides, you have known vpn ip paths that can support larger MTUs you can increase/decrease them here
+ routes:
+ #- mtu: 8800
+ # route: 10.0.0.0/16
+ # Unsafe routes allows you to route traffic over nebula to non-nebula nodes
+ # Unsafe routes should be avoided unless you have hosts/services that cannot run nebula
+ # NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate
+ unsafe_routes:
+ #- route: 172.16.1.0/24
+ # via: 192.168.100.99
+ # mtu: 1300 #mtu will default to tun mtu if this option is not sepcified
+
+
+# TODO
+# Configure logging level
+logging:
+ # panic, fatal, error, warning, info, or debug. Default is info
+ level: info
+ # json or text formats currently available. Default is text
+ format: text
+ # Disable timestamp logging. useful when output is redirected to logging system that already adds timestamps. Default is false
+ #disable_timestamp: true
+ # timestamp format is specified in Go time format, see:
+ # https://golang.org/pkg/time/#pkg-constants
+ # default when `format: json`: "2006-01-02T15:04:05Z07:00" (RFC3339)
+ # default when `format: text`:
+ # when TTY attached: seconds since beginning of execution
+ # otherwise: "2006-01-02T15:04:05Z07:00" (RFC3339)
+ # As an example, to log as RFC3339 with millisecond precision, set to:
+ #timestamp_format: "2006-01-02T15:04:05.000Z07:00"
+
+#stats:
+ #type: graphite
+ #prefix: nebula
+ #protocol: tcp
+ #host: 127.0.0.1:9999
+ #interval: 10s
+
+ #type: prometheus
+ #listen: 127.0.0.1:8080
+ #path: /metrics
+ #namespace: prometheusns
+ #subsystem: nebula
+ #interval: 10s
+
+ # enables counter metrics for meta packets
+ # e.g.: `messages.tx.handshake`
+ # NOTE: `message.{tx,rx}.recv_error` is always emitted
+ #message_metrics: false
+
+ # enables detailed counter metrics for lighthouse packets
+ # e.g.: `lighthouse.rx.HostQuery`
+ #lighthouse_metrics: false
+
+# Handshake Manager Settings
+#handshakes:
+ # Handshakes are sent to all known addresses at each interval with a linear backoff,
+ # Wait try_interval after the 1st attempt, 2 * try_interval after the 2nd, etc, until the handshake is older than timeout
+ # A 100ms interval with the default 10 retries will give a handshake 5.5 seconds to resolve before timing out
+ #try_interval: 100ms
+ #retries: 20
+ # trigger_buffer is the size of the buffer channel for quickly sending handshakes
+ # after receiving the response for lighthouse queries
+ #trigger_buffer: 64
+
+
+# Nebula security group configuration
+firewall:
+ conntrack:
+ tcp_timeout: 12m
+ udp_timeout: 3m
+ default_timeout: 10m
+ max_connections: 100000
+
+ # The firewall is default deny. There is no way to write a deny rule.
+ # Rules are comprised of a protocol, port, and one or more of host, group, or CIDR
+ # Logical evaluation is roughly: port AND proto AND (ca_sha OR ca_name) AND (host OR group OR groups OR cidr)
+ # - port: Takes `0` or `any` as any, a single number `80`, a range `200-901`, or `fragment` to match second and further fragments of fragmented packets (since there is no port available).
+ # code: same as port but makes more sense when talking about ICMP, TODO: this is not currently implemented in a way that works, use `any`
+ # proto: `any`, `tcp`, `udp`, or `icmp`
+ # host: `any` or a literal hostname, ie `test-host`
+ # group: `any` or a literal group name, ie `default-group`
+ # groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
+ # cidr: a CIDR, `0.0.0.0/0` is any.
+ # ca_name: An issuing CA name
+ # ca_sha: An issuing CA shasum
+
+ outbound:
+ # Allow all outbound traffic from this node
+ - port: any
+ proto: any
+ host: any
+
+ inbound:
+ # # Allow icmp between any nebula hosts
+ # - port: any
+ # proto: icmp
+ # host: any
+
+ - port: any
+ proto: any
+ host: any
+
+ # # Allow tcp/443 from any host with BOTH laptop and home group
+ # - port: 443
+ # proto: tcp
+ # groups:
+ # - laptop
+ # - home
diff --git a/scripts/homelab/installer/pihole.sh b/scripts/homelab/installer/pihole.sh
index f7e26ba..f1f0270 100644
--- a/scripts/homelab/installer/pihole.sh
+++ b/scripts/homelab/installer/pihole.sh
@@ -3,6 +3,7 @@
helm upgrade --create-namespace \
--namespace pihole \
pihole mojo2600/pihole \
+ --version 2.4.2 \
--set image.repository="pihole/pihole" \
--set image.tag=v5.8.1 \
--set persistentVolumeClaim.enabled=true \
@@ -23,3 +24,5 @@
--set resources.limits.cpu="500m" \
--set resources.requests.memory="100M" \
--set resources.limits.memory="250M"
+
+# specify ingressClassName manually