Installer: make vpn ip configurable
diff --git a/charts/auth/templates/lighthouse-config.yaml b/charts/auth/templates/lighthouse-config.yaml
index f4444e0..1318c1a 100644
--- a/charts/auth/templates/lighthouse-config.yaml
+++ b/charts/auth/templates/lighthouse-config.yaml
@@ -5,248 +5,43 @@
namespace: {{ .Release.Namespace }}
data:
lighthouse.yaml: |
- # This is the nebula example configuration file. You must edit, at a minimum, the static_host_map, lighthouse, and firewall sections
- # Some options in this file are HUPable, including the pki section. (A HUP will reload credentials from disk without affecting existing tunnels)
-
- # PKI defines the location of credentials for this node. Each of these can also be inlined by using the yaml ": |" syntax.
pki:
- # The CAs that are accepted by this node. Must contain one or more certificates created by 'nebula-cert ca'
- ##ca: /etc/nebula/ca/ca.crt
ca: /etc/nebula/lighthouse/ca.crt
cert: /etc/nebula/lighthouse/host.crt
key: /etc/nebula/lighthouse/host.key
- #blocklist is a list of certificate fingerprints that we will refuse to talk to
- #blocklist:
- # - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72
-
- # The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
- # A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
- # The syntax is:
- # "{nebula ip}": ["{routable ip/dns name}:{routable port}"]
- # Example, if your lighthouse has the nebula IP of 192.168.100.1 and has the real ip address of 100.64.22.11 and runs on port 4243:
static_host_map:
- "{{ .Values.ui.nebula.lighthouse.internalIP }}": ["{{ .Values.ui.nebula.lighthouse.externalIP }}>:{{ .Values.ui.nebula.lighthouse.port }}>"]
-
-
+ "{{ .Values.ui.nebula.lighthouse.internalIP }}": ["{{ .Values.ui.nebula.lighthouse.externalIP }}:{{ .Values.ui.nebula.lighthouse.port }}"]
lighthouse:
- # am_lighthouse is used to enable lighthouse functionality for a node. This should ONLY be true on nodes
- # you have configured to be lighthouses in your network
am_lighthouse: false
- # serve_dns optionally starts a dns listener that responds to various queries and can even be
- # delegated to for resolution
- #serve_dns: false
- #dns:
- # The DNS host defines the IP to bind the dns listener to. This also allows binding to the nebula node IP.
- #host: 0.0.0.0
- #port: 53
- # interval is the number of seconds between updates from this node to a lighthouse.
- # during updates, a node sends information about its current IP addresses to each node.
interval: 60
- # hosts is a list of lighthouse hosts this node should report to and query from
- # IMPORTANT: THIS SHOULD BE EMPTY ON LIGHTHOUSE NODES
- # IMPORTANT2: THIS SHOULD BE LIGHTHOUSES' NEBULA IPs, NOT LIGHTHOUSES' REAL ROUTABLE IPs
hosts:
- {{ .Values.ui.nebula.lighthouse.internalIP }}
-
- # remote_allow_list allows you to control ip ranges that this node will
- # consider when handshaking to another node. By default, any remote IPs are
- # allowed. You can provide CIDRs here with `true` to allow and `false` to
- # deny. The most specific CIDR rule applies to each remote. If all rules are
- # "allow", the default will be "deny", and vice-versa. If both "allow" and
- # "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the
- # default.
- #remote_allow_list:
- # Example to block IPs from this subnet from being used for remote IPs.
- #"172.16.0.0/12": false
-
- # A more complicated example, allow public IPs but only private IPs from a specific subnet
- #"0.0.0.0/0": true
- #"10.0.0.0/8": false
- #"10.42.42.0/24": true
-
- # local_allow_list allows you to filter which local IP addresses we advertise
- # to the lighthouses. This uses the same logic as `remote_allow_list`, but
- # additionally, you can specify an `interfaces` map of regular expressions
- # to match against interface names. The regexp must match the entire name.
- # All interface rules must be either true or false (and the default will be
- # the inverse). CIDR rules are matched after interface name rules.
- # Default is all local IP addresses.
- #local_allow_list:
- # Example to block tun0 and all docker interfaces.
- #interfaces:
- #tun0: false
- #'docker.*': false
- # Example to only advertise this subnet to the lighthouse.
- #"10.0.0.0/8": true
-
- # Port Nebula will be listening on. The default here is 4243. For a lighthouse node, the port should be defined,
- # however using port 0 will dynamically assign a port and is recommended for roaming nodes.
listen:
- # To listen on both any ipv4 and ipv6 use "[::]"
host: "[::]"
- port: 4243
- # Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg)
- # default is 64, does not support reload
- #batch: 64
- # Configure socket buffers for the udp side (outside), leave unset to use the system defaults. Values will be doubled by the kernel
- # Default is net.core.rmem_default and net.core.wmem_default (/proc/sys/net/core/rmem_default and /proc/sys/net/core/rmem_default)
- # Maximum is limited by memory in the system, SO_RCVBUFFORCE and SO_SNDBUFFORCE is used to avoid having to raise the system wide
- # max, net.core.rmem_max and net.core.wmem_max
- #read_buffer: 10485760
- #write_buffer: 10485760
-
- # EXPERIMENTAL: This option is currently only supported on linux and may
- # change in future minor releases.
- #
- # Routines is the number of thread pairs to run that consume from the tun and UDP queues.
- # Currently, this defaults to 1 which means we have 1 tun queue reader and 1
- # UDP queue reader. Setting this above one will set IFF_MULTI_QUEUE on the tun
- # device and SO_REUSEPORT on the UDP socket to allow multiple queues.
- #routines: 1
-
+ port: 4242
punchy:
- # Continues to punch inbound/outbound at a regular interval to avoid expiration of firewall nat mappings
punch: true
-
- # respond means that a node you are trying to reach will connect back out to you if your hole punching fails
- # this is extremely useful if one node is behind a difficult nat, such as a symmetric NAT
- # Default is false
- #respond: true
-
- # delays a punch response for misbehaving NATs, default is 1 second, respond must be true to take effect
- #delay: 1s
-
- # Cipher allows you to choose between the available ciphers for your network. Options are chachapoly or aes
- # IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously!
cipher: chachapoly
-
- # Local range is used to define a hint about the local network range, which speeds up discovering the fastest
- # path to a network adjacent nebula node.
- #local_range: "172.16.0.0/24"
-
- # sshd can expose informational and administrative functions via ssh this is a
- #sshd:
- # Toggles the feature
- #enabled: true
- # Host and port to listen on, port 22 is not allowed for your safety
- #listen: 127.0.0.1:2222
- # A file containing the ssh host private key to use
- # A decent way to generate one: ssh-keygen -t ed25519 -f ssh_host_ed25519_key -N "" < /dev/null
- #host_key: ./ssh_host_ed25519_key
- # A file containing a list of authorized public keys
- #authorized_users:
- #- user: steeeeve
- # keys can be an array of strings or single string
- #keys:
- #- "ssh public key string"
-
- # Configure the private interface. Note: addr is baked into the nebula certificate
tun:
- # When tun is disabled, a lighthouse can be started without a local tun interface (and therefore without root)
disabled: false
- # Name of the device
dev: nebula1
- # Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert
drop_local_broadcast: false
- # Toggles forwarding of multicast packets
drop_multicast: false
- # Sets the transmit queue length, if you notice lots of transmit drops on the tun it may help to raise this number. Default is 500
tx_queue: 500
- # Default MTU for every packet, safe setting is (and the default) 1300 for internet based traffic
mtu: 1300
- # Route based MTU overrides, you have known vpn ip paths that can support larger MTUs you can increase/decrease them here
- routes:
- #- mtu: 8800
- # route: 10.0.0.0/16
- # Unsafe routes allows you to route traffic over nebula to non-nebula nodes
- # Unsafe routes should be avoided unless you have hosts/services that cannot run nebula
- # NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate
- unsafe_routes:
- #- route: 172.16.1.0/24
- # via: 192.168.100.99
- # mtu: 1300 #mtu will default to tun mtu if this option is not sepcified
-
-
- # TODO
- # Configure logging level
logging:
- # panic, fatal, error, warning, info, or debug. Default is info
level: info
- # json or text formats currently available. Default is text
format: text
- # Disable timestamp logging. useful when output is redirected to logging system that already adds timestamps. Default is false
- #disable_timestamp: true
- # timestamp format is specified in Go time format, see:
- # https://golang.org/pkg/time/#pkg-constants
- # default when `format: json`: "2006-01-02T15:04:05Z07:00" (RFC3339)
- # default when `format: text`:
- # when TTY attached: seconds since beginning of execution
- # otherwise: "2006-01-02T15:04:05Z07:00" (RFC3339)
- # As an example, to log as RFC3339 with millisecond precision, set to:
- #timestamp_format: "2006-01-02T15:04:05.000Z07:00"
-
- #stats:
- #type: graphite
- #prefix: nebula
- #protocol: tcp
- #host: 127.0.0.1:9999
- #interval: 10s
-
- #type: prometheus
- #listen: 127.0.0.1:8080
- #path: /metrics
- #namespace: prometheusns
- #subsystem: nebula
- #interval: 10s
-
- # enables counter metrics for meta packets
- # e.g.: `messages.tx.handshake`
- # NOTE: `message.{tx,rx}.recv_error` is always emitted
- #message_metrics: false
-
- # enables detailed counter metrics for lighthouse packets
- # e.g.: `lighthouse.rx.HostQuery`
- #lighthouse_metrics: false
-
- # Handshake Manager Settings
- #handshakes:
- # Handshakes are sent to all known addresses at each interval with a linear backoff,
- # Wait try_interval after the 1st attempt, 2 * try_interval after the 2nd, etc, until the handshake is older than timeout
- # A 100ms interval with the default 10 retries will give a handshake 5.5 seconds to resolve before timing out
- #try_interval: 100ms
- #retries: 20
- # trigger_buffer is the size of the buffer channel for quickly sending handshakes
- # after receiving the response for lighthouse queries
- #trigger_buffer: 64
-
-
- # Nebula security group configuration
firewall:
conntrack:
tcp_timeout: 12m
udp_timeout: 3m
default_timeout: 10m
max_connections: 100000
-
- # The firewall is default deny. There is no way to write a deny rule.
- # Rules are comprised of a protocol, port, and one or more of host, group, or CIDR
- # Logical evaluation is roughly: port AND proto AND (ca_sha OR ca_name) AND (host OR group OR groups OR cidr)
- # - port: Takes `0` or `any` as any, a single number `80`, a range `200-901`, or `fragment` to match second and further fragments of fragmented packets (since there is no port available).
- # code: same as port but makes more sense when talking about ICMP, TODO: this is not currently implemented in a way that works, use `any`
- # proto: `any`, `tcp`, `udp`, or `icmp`
- # host: `any` or a literal hostname, ie `test-host`
- # group: `any` or a literal group name, ie `default-group`
- # groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
- # cidr: a CIDR, `0.0.0.0/0` is any.
- # ca_name: An issuing CA name
- # ca_sha: An issuing CA shasum
-
outbound:
- # Allow all outbound traffic from this node
- port: any
proto: any
host: any
-
inbound:
- port: any
proto: any
diff --git a/charts/auth/templates/ui.yaml b/charts/auth/templates/ui.yaml
index 8936f4d..ad31ccb 100644
--- a/charts/auth/templates/ui.yaml
+++ b/charts/auth/templates/ui.yaml
@@ -50,6 +50,8 @@
metadata:
labels:
app: ui
+ annotations:
+ checksum/config: {{ include (print $.Template.BasePath "/lighthouse-config.yaml") . | sha256sum }}
spec:
volumes:
- name: cert
diff --git a/charts/vpn-mesh-config/lighthouse.yaml b/charts/vpn-mesh-config/lighthouse.yaml
deleted file mode 100644
index cf106b5..0000000
--- a/charts/vpn-mesh-config/lighthouse.yaml
+++ /dev/null
@@ -1,246 +0,0 @@
-# This is the nebula example configuration file. You must edit, at a minimum, the static_host_map, lighthouse, and firewall sections
-# Some options in this file are HUPable, including the pki section. (A HUP will reload credentials from disk without affecting existing tunnels)
-
-# PKI defines the location of credentials for this node. Each of these can also be inlined by using the yaml ": |" syntax.
-pki:
- # The CAs that are accepted by this node. Must contain one or more certificates created by 'nebula-cert ca'
- ##ca: /etc/nebula/ca/ca.crt
- ca: /etc/nebula/lighthouse/ca.crt
- cert: /etc/nebula/lighthouse/host.crt
- key: /etc/nebula/lighthouse/host.key
- #blocklist is a list of certificate fingerprints that we will refuse to talk to
- #blocklist:
- # - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72
-
-# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
-# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
-# The syntax is:
-# "{nebula ip}": ["{routable ip/dns name}:{routable port}"]
-# Example, if your lighthouse has the nebula IP of 192.168.100.1 and has the real ip address of 100.64.22.11 and runs on port 4243:
-static_host_map:
- "<INTERNAL_IP>": ["<EXTERNAL_IP>:<PORT>"]
-
-
-lighthouse:
- # am_lighthouse is used to enable lighthouse functionality for a node. This should ONLY be true on nodes
- # you have configured to be lighthouses in your network
- am_lighthouse: true
- # serve_dns optionally starts a dns listener that responds to various queries and can even be
- # delegated to for resolution
- #serve_dns: false
- #dns:
- # The DNS host defines the IP to bind the dns listener to. This also allows binding to the nebula node IP.
- #host: 0.0.0.0
- #port: 53
- # interval is the number of seconds between updates from this node to a lighthouse.
- # during updates, a node sends information about its current IP addresses to each node.
- interval: 60
- # hosts is a list of lighthouse hosts this node should report to and query from
- # IMPORTANT: THIS SHOULD BE EMPTY ON LIGHTHOUSE NODES
- # IMPORTANT2: THIS SHOULD BE LIGHTHOUSES' NEBULA IPs, NOT LIGHTHOUSES' REAL ROUTABLE IPs
- hosts:
- # - "111.0.0.1"
-
- # remote_allow_list allows you to control ip ranges that this node will
- # consider when handshaking to another node. By default, any remote IPs are
- # allowed. You can provide CIDRs here with `true` to allow and `false` to
- # deny. The most specific CIDR rule applies to each remote. If all rules are
- # "allow", the default will be "deny", and vice-versa. If both "allow" and
- # "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the
- # default.
- #remote_allow_list:
- # Example to block IPs from this subnet from being used for remote IPs.
- #"172.16.0.0/12": false
-
- # A more complicated example, allow public IPs but only private IPs from a specific subnet
- #"0.0.0.0/0": true
- #"10.0.0.0/8": false
- #"10.42.42.0/24": true
-
- # local_allow_list allows you to filter which local IP addresses we advertise
- # to the lighthouses. This uses the same logic as `remote_allow_list`, but
- # additionally, you can specify an `interfaces` map of regular expressions
- # to match against interface names. The regexp must match the entire name.
- # All interface rules must be either true or false (and the default will be
- # the inverse). CIDR rules are matched after interface name rules.
- # Default is all local IP addresses.
- #local_allow_list:
- # Example to block tun0 and all docker interfaces.
- #interfaces:
- #tun0: false
- #'docker.*': false
- # Example to only advertise this subnet to the lighthouse.
- #"10.0.0.0/8": true
-
-# Port Nebula will be listening on. The default here is 4243. For a lighthouse node, the port should be defined,
-# however using port 0 will dynamically assign a port and is recommended for roaming nodes.
-listen:
- # To listen on both any ipv4 and ipv6 use "[::]"
- host: "[::]"
- port: <PORT>
- # Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg)
- # default is 64, does not support reload
- #batch: 64
- # Configure socket buffers for the udp side (outside), leave unset to use the system defaults. Values will be doubled by the kernel
- # Default is net.core.rmem_default and net.core.wmem_default (/proc/sys/net/core/rmem_default and /proc/sys/net/core/rmem_default)
- # Maximum is limited by memory in the system, SO_RCVBUFFORCE and SO_SNDBUFFORCE is used to avoid having to raise the system wide
- # max, net.core.rmem_max and net.core.wmem_max
- #read_buffer: 10485760
- #write_buffer: 10485760
-
-# EXPERIMENTAL: This option is currently only supported on linux and may
-# change in future minor releases.
-#
-# Routines is the number of thread pairs to run that consume from the tun and UDP queues.
-# Currently, this defaults to 1 which means we have 1 tun queue reader and 1
-# UDP queue reader. Setting this above one will set IFF_MULTI_QUEUE on the tun
-# device and SO_REUSEPORT on the UDP socket to allow multiple queues.
-#routines: 1
-
-punchy:
- # Continues to punch inbound/outbound at a regular interval to avoid expiration of firewall nat mappings
- punch: true
-
- # respond means that a node you are trying to reach will connect back out to you if your hole punching fails
- # this is extremely useful if one node is behind a difficult nat, such as a symmetric NAT
- # Default is false
- #respond: true
-
- # delays a punch response for misbehaving NATs, default is 1 second, respond must be true to take effect
- #delay: 1s
-
-# Cipher allows you to choose between the available ciphers for your network. Options are chachapoly or aes
-# IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously!
-cipher: chachapoly
-
-# Local range is used to define a hint about the local network range, which speeds up discovering the fastest
-# path to a network adjacent nebula node.
-#local_range: "172.16.0.0/24"
-
-# sshd can expose informational and administrative functions via ssh this is a
-#sshd:
- # Toggles the feature
- #enabled: true
- # Host and port to listen on, port 22 is not allowed for your safety
- #listen: 127.0.0.1:2222
- # A file containing the ssh host private key to use
- # A decent way to generate one: ssh-keygen -t ed25519 -f ssh_host_ed25519_key -N "" < /dev/null
- #host_key: ./ssh_host_ed25519_key
- # A file containing a list of authorized public keys
- #authorized_users:
- #- user: steeeeve
- # keys can be an array of strings or single string
- #keys:
- #- "ssh public key string"
-
-# Configure the private interface. Note: addr is baked into the nebula certificate
-tun:
- # When tun is disabled, a lighthouse can be started without a local tun interface (and therefore without root)
- disabled: false
- # Name of the device
- dev: nebula1
- # Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert
- drop_local_broadcast: false
- # Toggles forwarding of multicast packets
- drop_multicast: false
- # Sets the transmit queue length, if you notice lots of transmit drops on the tun it may help to raise this number. Default is 500
- tx_queue: 500
- # Default MTU for every packet, safe setting is (and the default) 1300 for internet based traffic
- mtu: 1300
- # Route based MTU overrides, you have known vpn ip paths that can support larger MTUs you can increase/decrease them here
- routes:
- #- mtu: 8800
- # route: 10.0.0.0/16
- # Unsafe routes allows you to route traffic over nebula to non-nebula nodes
- # Unsafe routes should be avoided unless you have hosts/services that cannot run nebula
- # NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate
- unsafe_routes:
- #- route: 172.16.1.0/24
- # via: 192.168.100.99
- # mtu: 1300 #mtu will default to tun mtu if this option is not sepcified
-
-
-# TODO
-# Configure logging level
-logging:
- # panic, fatal, error, warning, info, or debug. Default is info
- level: info
- # json or text formats currently available. Default is text
- format: text
- # Disable timestamp logging. useful when output is redirected to logging system that already adds timestamps. Default is false
- #disable_timestamp: true
- # timestamp format is specified in Go time format, see:
- # https://golang.org/pkg/time/#pkg-constants
- # default when `format: json`: "2006-01-02T15:04:05Z07:00" (RFC3339)
- # default when `format: text`:
- # when TTY attached: seconds since beginning of execution
- # otherwise: "2006-01-02T15:04:05Z07:00" (RFC3339)
- # As an example, to log as RFC3339 with millisecond precision, set to:
- #timestamp_format: "2006-01-02T15:04:05.000Z07:00"
-
-#stats:
- #type: graphite
- #prefix: nebula
- #protocol: tcp
- #host: 127.0.0.1:9999
- #interval: 10s
-
- #type: prometheus
- #listen: 127.0.0.1:8080
- #path: /metrics
- #namespace: prometheusns
- #subsystem: nebula
- #interval: 10s
-
- # enables counter metrics for meta packets
- # e.g.: `messages.tx.handshake`
- # NOTE: `message.{tx,rx}.recv_error` is always emitted
- #message_metrics: false
-
- # enables detailed counter metrics for lighthouse packets
- # e.g.: `lighthouse.rx.HostQuery`
- #lighthouse_metrics: false
-
-# Handshake Manager Settings
-#handshakes:
- # Handshakes are sent to all known addresses at each interval with a linear backoff,
- # Wait try_interval after the 1st attempt, 2 * try_interval after the 2nd, etc, until the handshake is older than timeout
- # A 100ms interval with the default 10 retries will give a handshake 5.5 seconds to resolve before timing out
- #try_interval: 100ms
- #retries: 20
- # trigger_buffer is the size of the buffer channel for quickly sending handshakes
- # after receiving the response for lighthouse queries
- #trigger_buffer: 64
-
-
-# Nebula security group configuration
-firewall:
- conntrack:
- tcp_timeout: 12m
- udp_timeout: 3m
- default_timeout: 10m
- max_connections: 100000
-
- # The firewall is default deny. There is no way to write a deny rule.
- # Rules are comprised of a protocol, port, and one or more of host, group, or CIDR
- # Logical evaluation is roughly: port AND proto AND (ca_sha OR ca_name) AND (host OR group OR groups OR cidr)
- # - port: Takes `0` or `any` as any, a single number `80`, a range `200-901`, or `fragment` to match second and further fragments of fragmented packets (since there is no port available).
- # code: same as port but makes more sense when talking about ICMP, TODO: this is not currently implemented in a way that works, use `any`
- # proto: `any`, `tcp`, `udp`, or `icmp`
- # host: `any` or a literal hostname, ie `test-host`
- # group: `any` or a literal group name, ie `default-group`
- # groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
- # cidr: a CIDR, `0.0.0.0/0` is any.
- # ca_name: An issuing CA name
- # ca_sha: An issuing CA shasum
-
- outbound:
- # Allow all outbound traffic from this node
- - port: any
- proto: any
- host: any
-
- inbound:
- - port: any
- proto: any
- host: any
diff --git a/charts/vpn-mesh-config/templates/lighthouse-config.yaml b/charts/vpn-mesh-config/templates/lighthouse-config.yaml
index b318546..7ce6c0a 100644
--- a/charts/vpn-mesh-config/templates/lighthouse-config.yaml
+++ b/charts/vpn-mesh-config/templates/lighthouse-config.yaml
@@ -4,4 +4,43 @@
name: lighthouse-config
namespace: {{ .Release.Namespace }}
data:
-{{ (.Files.Glob "lighthouse.yaml").AsConfig | replace "<INTERNAL_IP>" .Values.lighthouse.internalIP | replace "<EXTERNAL_IP>" .Values.lighthouse.externalIP | replace "<PORT>" .Values.lighthouse.port | indent 2 }}
+ lighthouse.yaml: |
+ pki:
+ ca: /etc/nebula/lighthouse/ca.crt
+ cert: /etc/nebula/lighthouse/host.crt
+ key: /etc/nebula/lighthouse/host.key
+ static_host_map:
+ "{{ .Values.lighthouse.internalIP }}": ["{{ .Values.lighthouse.externalIP }}:{{ .Values.lighthouse.port }}"]
+ lighthouse:
+ am_lighthouse: true
+ interval: 60
+ listen:
+ host: "[::]"
+ port: {{ .Values.lighthouse.port }}
+ punchy:
+ punch: true
+ cipher: chachapoly
+ tun:
+ disabled: false
+ dev: nebula1
+ drop_local_broadcast: false
+ drop_multicast: false
+ tx_queue: 500
+ mtu: 1300
+ logging:
+ level: info
+ format: text
+ firewall:
+ conntrack:
+ tcp_timeout: 12m
+ udp_timeout: 3m
+ default_timeout: 10m
+ max_connections: 100000
+ outbound:
+ - port: any
+ proto: any
+ host: any
+ inbound:
+ - port: any
+ proto: any
+ host: any
diff --git a/core/auth/hydra/hydra.yaml b/core/auth/hydra/hydra.yaml
deleted file mode 100644
index a650e39..0000000
--- a/core/auth/hydra/hydra.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
-version: v1.10.6
-
-dsn: postgres://postgres:psswd@postgres:5432/hydra?sslmode=disable&max_conns=20&max_idle_conns=4
-
-serve:
- cookies:
- same_site_mode: None
- public:
- cors:
- enabled: true
- debug: true
- allow_credentials: true
- allowed_origins:
- - https://lekva.me
- - https://*.lekva.me
- admin:
- # host: localhost
- cors:
- allowed_origins:
- - https://hydra.pcloud
- tls:
- allow_termination_from:
- - 0.0.0.0/0
- - 10.42.0.0/16
- - 10.43.0.0/16
- - 111.0.0.1/32
- tls:
- allow_termination_from:
- - 0.0.0.0/0
- - 10.42.0.0/16
- - 10.43.0.0/16
- - 111.0.0.1/32
-
-urls:
- self:
- public: https://hydra.lekva.me
- issuer: https://hydra.lekva.me
- consent: https://accounts-ui.lekva.me/consent
- login: https://accounts-ui.lekva.me/login
- logout: https://accounts-ui.lekva.me/logout
-
-secrets:
- system:
- - youReallyNeedToChangeThis
-
-oidc:
- subject_identifiers:
- supported_types:
- - pairwise
- - public
- pairwise:
- salt: youReallyNeedToChangeThis
-
-log:
- level: trace
- leak_sensitive_values: true
diff --git a/core/auth/ui/install.yaml b/core/auth/ui/install.yaml
deleted file mode 100644
index 35ee23a..0000000
--- a/core/auth/ui/install.yaml
+++ /dev/null
@@ -1,122 +0,0 @@
----
-apiVersion: v1
-kind: Namespace
-metadata:
- name: core-auth
----
-apiVersion: v1
-kind: Service
-metadata:
- name: kratos-selfservice-ui
- namespace: core-auth
-spec:
- type: ClusterIP
- selector:
- app: kratos-selfservice-ui
- ports:
- - name: http
- port: 80
- targetPort: http
- protocol: TCP
----
-apiVersion: networking.k8s.io/v1
-kind: Ingress
-metadata:
- name: ingress-kratos-selfservice-ui-public
- namespace: core-auth
- annotations:
- cert-manager.io/cluster-issuer: "letsencrypt-prod"
- acme.cert-manager.io/http01-edit-in-place: "true"
-spec:
- ingressClassName: nginx
- tls:
- - hosts:
- - accounts-ui.lekva.me
- secretName: cert-accounts-ui.lekva.me
- rules:
- - host: accounts-ui.lekva.me
- http:
- paths:
- - path: /
- pathType: Prefix
- backend:
- service:
- name: kratos-selfservice-ui
- port:
- name: http
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: kratos-selfservice-ui
- namespace: core-auth
-spec:
- selector:
- matchLabels:
- app: kratos-selfservice-ui
- replicas: 1
- template:
- metadata:
- labels:
- app: kratos-selfservice-ui
- spec:
- volumes:
- - name: cert
- secret:
- secretName: node-auth-ui-cert
- - name: config
- configMap:
- name: auth-ui-lighthouse-config
- hostAliases:
- - ip: "111.0.0.1"
- hostnames:
- - "hydra.pcloud"
- containers:
- - name: server
- image: giolekva/auth-ui:latest
- imagePullPolicy: Always
- env:
- - name: KRATOS_PUBLIC_URL
- value: "https://accounts.lekva.me"
- ports:
- - name: http
- containerPort: 8080
- protocol: TCP
- command: ["server", "--port=8080"]
- # resources:
- # requests:
- # memory: "10Mi"
- # cpu: "10m"
- # limits:
- # memory: "20Mi"
- # cpu: "100m"
- - name: lighthouse
- image: giolekva/nebula:latest
- imagePullPolicy: IfNotPresent
- securityContext:
- capabilities:
- add: ["NET_ADMIN"]
- privileged: true
- ports:
- - name: lighthouse
- containerPort: 4247
- protocol: UDP
- command: ["nebula", "--config=/etc/nebula/config/lighthouse.yaml"]
- volumeMounts:
- - name: cert
- mountPath: /etc/nebula/lighthouse
- readOnly: true
- - name: config
- mountPath: /etc/nebula/config
- readOnly: true
----
-apiVersion: lekva.me/v1
-kind: NebulaNode
-metadata:
- name: auth-ui
- namespace: core-auth
-spec:
- caName: pcloud
- caNamespace: ingress-nginx-private
- ipCidr: "111.0.0.10/24"
- secretName: node-auth-ui-cert
diff --git a/core/auth/ui/lighthouse.yaml b/core/auth/ui/lighthouse.yaml
deleted file mode 100644
index 7b6ad10..0000000
--- a/core/auth/ui/lighthouse.yaml
+++ /dev/null
@@ -1,246 +0,0 @@
-# This is the nebula example configuration file. You must edit, at a minimum, the static_host_map, lighthouse, and firewall sections
-# Some options in this file are HUPable, including the pki section. (A HUP will reload credentials from disk without affecting existing tunnels)
-
-# PKI defines the location of credentials for this node. Each of these can also be inlined by using the yaml ": |" syntax.
-pki:
- # The CAs that are accepted by this node. Must contain one or more certificates created by 'nebula-cert ca'
- ##ca: /etc/nebula/ca/ca.crt
- ca: /etc/nebula/lighthouse/ca.crt
- cert: /etc/nebula/lighthouse/host.crt
- key: /etc/nebula/lighthouse/host.key
- #blocklist is a list of certificate fingerprints that we will refuse to talk to
- #blocklist:
- # - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72
-
-# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
-# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
-# The syntax is:
-# "{nebula ip}": ["{routable ip/dns name}:{routable port}"]
-# Example, if your lighthouse has the nebula IP of 192.168.100.1 and has the real ip address of 100.64.22.11 and runs on port 4242:
-static_host_map:
- "111.0.0.1": ["46.49.35.44:4242"]
-
-
-lighthouse:
- # am_lighthouse is used to enable lighthouse functionality for a node. This should ONLY be true on nodes
- # you have configured to be lighthouses in your network
- am_lighthouse: false
- # serve_dns optionally starts a dns listener that responds to various queries and can even be
- # delegated to for resolution
- #serve_dns: false
- #dns:
- # The DNS host defines the IP to bind the dns listener to. This also allows binding to the nebula node IP.
- #host: 0.0.0.0
- #port: 53
- # interval is the number of seconds between updates from this node to a lighthouse.
- # during updates, a node sends information about its current IP addresses to each node.
- interval: 60
- # hosts is a list of lighthouse hosts this node should report to and query from
- # IMPORTANT: THIS SHOULD BE EMPTY ON LIGHTHOUSE NODES
- # IMPORTANT2: THIS SHOULD BE LIGHTHOUSES' NEBULA IPs, NOT LIGHTHOUSES' REAL ROUTABLE IPs
- hosts:
- - "111.0.0.1"
-
- # remote_allow_list allows you to control ip ranges that this node will
- # consider when handshaking to another node. By default, any remote IPs are
- # allowed. You can provide CIDRs here with `true` to allow and `false` to
- # deny. The most specific CIDR rule applies to each remote. If all rules are
- # "allow", the default will be "deny", and vice-versa. If both "allow" and
- # "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the
- # default.
- #remote_allow_list:
- # Example to block IPs from this subnet from being used for remote IPs.
- #"172.16.0.0/12": false
-
- # A more complicated example, allow public IPs but only private IPs from a specific subnet
- #"0.0.0.0/0": true
- #"10.0.0.0/8": false
- #"10.42.42.0/24": true
-
- # local_allow_list allows you to filter which local IP addresses we advertise
- # to the lighthouses. This uses the same logic as `remote_allow_list`, but
- # additionally, you can specify an `interfaces` map of regular expressions
- # to match against interface names. The regexp must match the entire name.
- # All interface rules must be either true or false (and the default will be
- # the inverse). CIDR rules are matched after interface name rules.
- # Default is all local IP addresses.
- #local_allow_list:
- # Example to block tun0 and all docker interfaces.
- #interfaces:
- #tun0: false
- #'docker.*': false
- # Example to only advertise this subnet to the lighthouse.
- #"10.0.0.0/8": true
-
-# Port Nebula will be listening on. The default here is 4242. For a lighthouse node, the port should be defined,
-# however using port 0 will dynamically assign a port and is recommended for roaming nodes.
-listen:
- # To listen on both any ipv4 and ipv6 use "[::]"
- host: "[::]"
- port: 4247
- # Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg)
- # default is 64, does not support reload
- #batch: 64
- # Configure socket buffers for the udp side (outside), leave unset to use the system defaults. Values will be doubled by the kernel
- # Default is net.core.rmem_default and net.core.wmem_default (/proc/sys/net/core/rmem_default and /proc/sys/net/core/rmem_default)
- # Maximum is limited by memory in the system, SO_RCVBUFFORCE and SO_SNDBUFFORCE is used to avoid having to raise the system wide
- # max, net.core.rmem_max and net.core.wmem_max
- #read_buffer: 10485760
- #write_buffer: 10485760
-
-# EXPERIMENTAL: This option is currently only supported on linux and may
-# change in future minor releases.
-#
-# Routines is the number of thread pairs to run that consume from the tun and UDP queues.
-# Currently, this defaults to 1 which means we have 1 tun queue reader and 1
-# UDP queue reader. Setting this above one will set IFF_MULTI_QUEUE on the tun
-# device and SO_REUSEPORT on the UDP socket to allow multiple queues.
-#routines: 1
-
-punchy:
- # Continues to punch inbound/outbound at a regular interval to avoid expiration of firewall nat mappings
- punch: true
-
- # respond means that a node you are trying to reach will connect back out to you if your hole punching fails
- # this is extremely useful if one node is behind a difficult nat, such as a symmetric NAT
- # Default is false
- #respond: true
-
- # delays a punch response for misbehaving NATs, default is 1 second, respond must be true to take effect
- #delay: 1s
-
-# Cipher allows you to choose between the available ciphers for your network. Options are chachapoly or aes
-# IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously!
-cipher: chachapoly
-
-# Local range is used to define a hint about the local network range, which speeds up discovering the fastest
-# path to a network adjacent nebula node.
-#local_range: "172.16.0.0/24"
-
-# sshd can expose informational and administrative functions via ssh this is a
-#sshd:
- # Toggles the feature
- #enabled: true
- # Host and port to listen on, port 22 is not allowed for your safety
- #listen: 127.0.0.1:2222
- # A file containing the ssh host private key to use
- # A decent way to generate one: ssh-keygen -t ed25519 -f ssh_host_ed25519_key -N "" < /dev/null
- #host_key: ./ssh_host_ed25519_key
- # A file containing a list of authorized public keys
- #authorized_users:
- #- user: steeeeve
- # keys can be an array of strings or single string
- #keys:
- #- "ssh public key string"
-
-# Configure the private interface. Note: addr is baked into the nebula certificate
-tun:
- # When tun is disabled, a lighthouse can be started without a local tun interface (and therefore without root)
- disabled: false
- # Name of the device
- dev: nebula1
- # Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert
- drop_local_broadcast: false
- # Toggles forwarding of multicast packets
- drop_multicast: false
- # Sets the transmit queue length, if you notice lots of transmit drops on the tun it may help to raise this number. Default is 500
- tx_queue: 500
- # Default MTU for every packet, safe setting is (and the default) 1300 for internet based traffic
- mtu: 576
- # Route based MTU overrides, you have known vpn ip paths that can support larger MTUs you can increase/decrease them here
- routes:
- #- mtu: 8800
- # route: 10.0.0.0/16
- # Unsafe routes allows you to route traffic over nebula to non-nebula nodes
- # Unsafe routes should be avoided unless you have hosts/services that cannot run nebula
- # NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate
- unsafe_routes:
- #- route: 172.16.1.0/24
- # via: 192.168.100.99
- # mtu: 1300 #mtu will default to tun mtu if this option is not sepcified
-
-
-# TODO
-# Configure logging level
-logging:
- # panic, fatal, error, warning, info, or debug. Default is info
- level: info
- # json or text formats currently available. Default is text
- format: text
- # Disable timestamp logging. useful when output is redirected to logging system that already adds timestamps. Default is false
- #disable_timestamp: true
- # timestamp format is specified in Go time format, see:
- # https://golang.org/pkg/time/#pkg-constants
- # default when `format: json`: "2006-01-02T15:04:05Z07:00" (RFC3339)
- # default when `format: text`:
- # when TTY attached: seconds since beginning of execution
- # otherwise: "2006-01-02T15:04:05Z07:00" (RFC3339)
- # As an example, to log as RFC3339 with millisecond precision, set to:
- #timestamp_format: "2006-01-02T15:04:05.000Z07:00"
-
-#stats:
- #type: graphite
- #prefix: nebula
- #protocol: tcp
- #host: 127.0.0.1:9999
- #interval: 10s
-
- #type: prometheus
- #listen: 127.0.0.1:8080
- #path: /metrics
- #namespace: prometheusns
- #subsystem: nebula
- #interval: 10s
-
- # enables counter metrics for meta packets
- # e.g.: `messages.tx.handshake`
- # NOTE: `message.{tx,rx}.recv_error` is always emitted
- #message_metrics: false
-
- # enables detailed counter metrics for lighthouse packets
- # e.g.: `lighthouse.rx.HostQuery`
- #lighthouse_metrics: false
-
-# Handshake Manager Settings
-#handshakes:
- # Handshakes are sent to all known addresses at each interval with a linear backoff,
- # Wait try_interval after the 1st attempt, 2 * try_interval after the 2nd, etc, until the handshake is older than timeout
- # A 100ms interval with the default 10 retries will give a handshake 5.5 seconds to resolve before timing out
- #try_interval: 100ms
- #retries: 20
- # trigger_buffer is the size of the buffer channel for quickly sending handshakes
- # after receiving the response for lighthouse queries
- #trigger_buffer: 64
-
-
-# Nebula security group configuration
-firewall:
- conntrack:
- tcp_timeout: 12m
- udp_timeout: 3m
- default_timeout: 10m
- max_connections: 100000
-
- # The firewall is default deny. There is no way to write a deny rule.
- # Rules are comprised of a protocol, port, and one or more of host, group, or CIDR
- # Logical evaluation is roughly: port AND proto AND (ca_sha OR ca_name) AND (host OR group OR groups OR cidr)
- # - port: Takes `0` or `any` as any, a single number `80`, a range `200-901`, or `fragment` to match second and further fragments of fragmented packets (since there is no port available).
- # code: same as port but makes more sense when talking about ICMP, TODO: this is not currently implemented in a way that works, use `any`
- # proto: `any`, `tcp`, `udp`, or `icmp`
- # host: `any` or a literal hostname, ie `test-host`
- # group: `any` or a literal group name, ie `default-group`
- # groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
- # cidr: a CIDR, `0.0.0.0/0` is any.
- # ca_name: An issuing CA name
- # ca_sha: An issuing CA shasum
-
- outbound:
- # Allow all outbound traffic from this node
- - port: any
- proto: any
- host: any
-
- inbound:
- - port: any
- proto: any
- host: any
diff --git a/helmfile/users/helmfile.yaml b/helmfile/users/helmfile.yaml
index 1515c39..692177d 100644
--- a/helmfile/users/helmfile.yaml
+++ b/helmfile/users/helmfile.yaml
@@ -32,9 +32,9 @@
name: {{ .Values.id }}
secretName: ca-{{ .Values.id }}-cert
- lighthouse:
- internalIP: 111.0.0.1
- externalIP: 46.49.35.44
- port: "4243"
+ internalIP: {{ .Values.lighthouseMainIP }}
+ externalIP: {{ .Values.publicIP }}
+ port: "{{ .Values.lighthouseMainPort }}"
- name: ingress-private
chart: ingress-nginx/ingress-nginx
version: 4.0.3
@@ -70,7 +70,7 @@
- NET_ADMIN
ports:
- name: nebula
- containerPort: 4243
+ containerPort: {{ .Values.lighthouseMainPort }}
protocol: UDP
command:
- nebula
@@ -81,7 +81,7 @@
- name: config
mountPath: /etc/nebula/config
config:
- bind-address: 111.0.0.1
+ bind-address: {{ .Values.lighthouseMainIP }}
proxy-body-size: 0
- udp:
53: "{{ .Values.namespacePrefix }}app-pihole/pihole-dns-udp:53"
@@ -375,13 +375,13 @@
- 0.0.0.0/0
- 10.42.0.0/16
- 10.43.0.0/16
- - 111.0.0.1/32
+ - {{ .Values.lighthouseMainIP }}/32
tls:
allow_termination_from:
- 0.0.0.0/0
- 10.42.0.0/16
- 10.43.0.0/16
- - 111.0.0.1/32
+ - {{ .Values.lighthouseMainIP }}/32
urls:
self:
public: https://hydra.{{ .Values.domain }}
@@ -410,12 +410,12 @@
nebula:
lighthouse:
name: ui-lighthouse
- internalIP: 111.0.0.1
- externalIP: 46.49.35.44
- port: "4243"
+ internalIP: {{ .Values.lighthouseMainIP }}
+ externalIP: {{ .Values.publicIP }}
+ port: "{{ .Values.lighthouseMainPort }}"
node:
name: ui
- ipCidr: 111.0.0.2/24
+ ipCidr: {{ .Values.lighthouseAuthUIIP }}/24
secretName: node-ui-cert
certificateAuthority:
name: {{ .Values.id }}
@@ -557,7 +557,11 @@
- certManagerNamespace: cert-manager
- mxHostname: mail.lekva.me
- mailGatewayAddress: "tcp://maddy.pcloud-mail-gateway.svc.cluster.local:587"
- - matrixStorageSize: 1Gi
+ - matrixStorageSize: 10Gi
+ - publicIP: 46.49.35.44
+ - lighthouseMainIP: 110.0.0.1
+ - lighthouseMainPort: 4242
+ - lighthouseAuthUIIP: 110.0.0.2
lekva:
secrets:
- secrets.lekva.yaml
@@ -571,3 +575,7 @@
- mxHostname: mail.lekva.me
- mailGatewayAddress: "tcp://maddy.pcloud-mail-gateway.svc.cluster.local:587"
- matrixStorageSize: 100Gi
+ - publicIP: 46.49.35.44
+ - lighthouseMainIP: 111.0.0.1
+ - lighthouseMainPort: 4243
+ - lighthouseAuthUIIP: 111.0.0.2