Installer: use helmfile for installing base & user services. For now only ingress with vpn mesh is covered
diff --git a/apps/pihole/install.yaml b/apps/pihole/install.yaml
deleted file mode 100644
index c858d1c..0000000
--- a/apps/pihole/install.yaml
+++ /dev/null
@@ -1,130 +0,0 @@
----
-apiVersion: v1
-kind: Service 
-metadata:
-  name: pihole
-  namespace: pihole
-spec:
-  type: ClusterIP
-  selector:
-    app: pihole
-  ports:
-    - name: http
-      port: 80
-      targetPort: 80
-      protocol: TCP
-    - name: https
-      port: 443
-      targetPort: 443
-      protocol: TCP
----
-apiVersion: v1
-kind: Service 
-metadata:
-  name: pihole-tpc
-  namespace: pihole
-spec:
-  type: LoadBalancer
-  selector:
-    app: pihole
-  ports:
-    - name: dnstcp
-      port: 53
-      targetPort: 53
-      protocol: TCP
----
-apiVersion: v1
-kind: Service 
-metadata:
-  name: pihole-udp
-  namespace: pihole
-spec:
-  type: LoadBalancer
-  selector:
-    app: pihole
-  ports:
-    - name: dnsudp
-      port: 53
-      targetPort: 53
-      protocol: UDP
-    - name: dhcp
-      port: 67
-      targetPort: 67
-      protocol: UDP
----
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
-metadata:
-  name: ingress
-  namespace: pihole
-spec:
-  entryPoints:
-    - web
-  routes:
-  - kind: Rule
-    match: Host(`pihole`)
-    services:
-    - kind: Service
-      name: pihole
-      namespace: pihole
-      passHostHeader: true
-      port: 80
----
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: pihole
-  namespace: pihole
-spec:
-  selector:
-    matchLabels:
-      app: pihole
-  serviceName: pihole
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: pihole
-    spec:
-      containers:
-      - name: pihole
-        image: pihole/pihole:latest
-        imagePullPolicy: Always
-        ports:
-        - containerPort: 53
-        - containerPort: 67
-        - containerPort: 80
-        - containerPort: 443
-        volumeMounts:
-        - name: pihole
-          mountPath: /etc/pihole/
-        - name: dnsmasq
-          mountPath: /etc/dnsmasq.d/
-        env:
-        - name: WEBPASSWORD
-          value: "1234"
-        - name: VIRTUAL_HOST
-          value: pihole
-        - name: TZ
-          value: "Asia/Tbilisi"
-        # Needed for DHCP  
-        # securityContext:
-        #   capabilities:
-        #     add: ["NET_ADMIN"]
-  volumeClaimTemplates:
-  - metadata:
-      name: pihole
-    spec:
-      accessModes: [ "ReadWriteOnce" ]
-      storageClassName: "local-path"
-      resources:
-        requests:
-          storage: 100Mi
-  - metadata:
-      name: dnsmasq
-    spec:
-      accessModes: [ "ReadWriteOnce" ]
-      storageClassName: "local-path"
-      resources:
-        requests:
-          storage: 100Mi
diff --git a/charts/vpn-mesh-config/.helmignore b/charts/vpn-mesh-config/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/charts/vpn-mesh-config/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/vpn-mesh-config/Chart.yaml b/charts/vpn-mesh-config/Chart.yaml
new file mode 100644
index 0000000..cb9112a
--- /dev/null
+++ b/charts/vpn-mesh-config/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: vpn-mesh-config
+description: A Helm chart for PCloud internal VPN mesh network configuration
+type: application
+version: 0.0.1
+appVersion: "0.0.1"
diff --git a/charts/vpn-mesh-config/lighthouse.yaml b/charts/vpn-mesh-config/lighthouse.yaml
new file mode 100644
index 0000000..d2891e6
--- /dev/null
+++ b/charts/vpn-mesh-config/lighthouse.yaml
@@ -0,0 +1,246 @@
+# This is the nebula example configuration file. You must edit, at a minimum, the static_host_map, lighthouse, and firewall sections
+# Some options in this file are HUPable, including the pki section. (A HUP will reload credentials from disk without affecting existing tunnels)
+
+# PKI defines the location of credentials for this node. Each of these can also be inlined by using the yaml ": |" syntax.
+pki:
+  # The CAs that are accepted by this node. Must contain one or more certificates created by 'nebula-cert ca'
+  ##ca: /etc/nebula/ca/ca.crt
+  ca: /etc/nebula/lighthouse/ca.crt
+  cert: /etc/nebula/lighthouse/host.crt
+  key: /etc/nebula/lighthouse/host.key
+  #blocklist is a list of certificate fingerprints that we will refuse to talk to
+  #blocklist:
+  #  - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72
+
+# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
+# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
+# The syntax is:
+#   "{nebula ip}": ["{routable ip/dns name}:{routable port}"]
+# Example, if your lighthouse has the nebula IP of 192.168.100.1 and has the real ip address of 100.64.22.11 and runs on port 4243:
+static_host_map:
+  "<INTERNAL_IP>": ["<EXTERNAL_IP>:<PORT>"]
+
+
+lighthouse:
+  # am_lighthouse is used to enable lighthouse functionality for a node. This should ONLY be true on nodes
+  # you have configured to be lighthouses in your network
+  am_lighthouse: true
+  # serve_dns optionally starts a dns listener that responds to various queries and can even be
+  # delegated to for resolution
+  #serve_dns: false
+  #dns:
+    # The DNS host defines the IP to bind the dns listener to. This also allows binding to the nebula node IP.
+    #host: 0.0.0.0
+    #port: 53
+  # interval is the number of seconds between updates from this node to a lighthouse.
+  # during updates, a node sends information about its current IP addresses to each node.
+  interval: 60
+  # hosts is a list of lighthouse hosts this node should report to and query from
+  # IMPORTANT: THIS SHOULD BE EMPTY ON LIGHTHOUSE NODES
+  # IMPORTANT2: THIS SHOULD BE LIGHTHOUSES' NEBULA IPs, NOT LIGHTHOUSES' REAL ROUTABLE IPs
+  hosts:
+    # - "111.0.0.1"
+
+  # remote_allow_list allows you to control ip ranges that this node will
+  # consider when handshaking to another node. By default, any remote IPs are
+  # allowed. You can provide CIDRs here with `true` to allow and `false` to
+  # deny. The most specific CIDR rule applies to each remote. If all rules are
+  # "allow", the default will be "deny", and vice-versa. If both "allow" and
+  # "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the
+  # default.
+  #remote_allow_list:
+    # Example to block IPs from this subnet from being used for remote IPs.
+    #"172.16.0.0/12": false
+
+    # A more complicated example, allow public IPs but only private IPs from a specific subnet
+    #"0.0.0.0/0": true
+    #"10.0.0.0/8": false
+    #"10.42.42.0/24": true
+
+  # local_allow_list allows you to filter which local IP addresses we advertise
+  # to the lighthouses. This uses the same logic as `remote_allow_list`, but
+  # additionally, you can specify an `interfaces` map of regular expressions
+  # to match against interface names. The regexp must match the entire name.
+  # All interface rules must be either true or false (and the default will be
+  # the inverse). CIDR rules are matched after interface name rules.
+  # Default is all local IP addresses.
+  #local_allow_list:
+    # Example to block tun0 and all docker interfaces.
+    #interfaces:
+      #tun0: false
+      #'docker.*': false
+    # Example to only advertise this subnet to the lighthouse.
+    #"10.0.0.0/8": true
+
+# Port Nebula will be listening on. The default here is 4243. For a lighthouse node, the port should be defined,
+# however using port 0 will dynamically assign a port and is recommended for roaming nodes.
+listen:
+  # To listen on both any ipv4 and ipv6 use "[::]"
+  host: "[::]"
+  port: 4243
+  # Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg)
+  # default is 64, does not support reload
+  #batch: 64
+  # Configure socket buffers for the udp side (outside), leave unset to use the system defaults. Values will be doubled by the kernel
+  # Default is net.core.rmem_default and net.core.wmem_default (/proc/sys/net/core/rmem_default and /proc/sys/net/core/rmem_default)
+  # Maximum is limited by memory in the system, SO_RCVBUFFORCE and SO_SNDBUFFORCE is used to avoid having to raise the system wide
+  # max, net.core.rmem_max and net.core.wmem_max
+  #read_buffer: 10485760
+  #write_buffer: 10485760
+
+# EXPERIMENTAL: This option is currently only supported on linux and may
+# change in future minor releases.
+#
+# Routines is the number of thread pairs to run that consume from the tun and UDP queues.
+# Currently, this defaults to 1 which means we have 1 tun queue reader and 1
+# UDP queue reader. Setting this above one will set IFF_MULTI_QUEUE on the tun
+# device and SO_REUSEPORT on the UDP socket to allow multiple queues.
+#routines: 1
+
+punchy:
+  # Continues to punch inbound/outbound at a regular interval to avoid expiration of firewall nat mappings
+  punch: true
+
+  # respond means that a node you are trying to reach will connect back out to you if your hole punching fails
+  # this is extremely useful if one node is behind a difficult nat, such as a symmetric NAT
+  # Default is false
+  #respond: true
+
+  # delays a punch response for misbehaving NATs, default is 1 second, respond must be true to take effect
+  #delay: 1s
+
+# Cipher allows you to choose between the available ciphers for your network. Options are chachapoly or aes
+# IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously!
+cipher: chachapoly
+
+# Local range is used to define a hint about the local network range, which speeds up discovering the fastest
+# path to a network adjacent nebula node.
+#local_range: "172.16.0.0/24"
+
+# sshd can expose informational and administrative functions via ssh this is a
+#sshd:
+  # Toggles the feature
+  #enabled: true
+  # Host and port to listen on, port 22 is not allowed for your safety
+  #listen: 127.0.0.1:2222
+  # A file containing the ssh host private key to use
+  # A decent way to generate one: ssh-keygen -t ed25519 -f ssh_host_ed25519_key -N "" < /dev/null
+  #host_key: ./ssh_host_ed25519_key
+  # A file containing a list of authorized public keys
+  #authorized_users:
+    #- user: steeeeve
+      # keys can be an array of strings or single string
+      #keys:
+        #- "ssh public key string"
+
+# Configure the private interface. Note: addr is baked into the nebula certificate
+tun:
+  # When tun is disabled, a lighthouse can be started without a local tun interface (and therefore without root)
+  disabled: false
+  # Name of the device
+  dev: nebula1
+  # Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert
+  drop_local_broadcast: false
+  # Toggles forwarding of multicast packets
+  drop_multicast: false
+  # Sets the transmit queue length, if you notice lots of transmit drops on the tun it may help to raise this number. Default is 500
+  tx_queue: 500
+  # Default MTU for every packet, safe setting is (and the default) 1300 for internet based traffic
+  mtu: 1300
+  # Route based MTU overrides, you have known vpn ip paths that can support larger MTUs you can increase/decrease them here
+  routes:
+    #- mtu: 8800
+    #  route: 10.0.0.0/16
+  # Unsafe routes allows you to route traffic over nebula to non-nebula nodes
+  # Unsafe routes should be avoided unless you have hosts/services that cannot run nebula
+  # NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate
+  unsafe_routes:
+    #- route: 172.16.1.0/24
+    #  via: 192.168.100.99
+    #  mtu: 1300 #mtu will default to tun mtu if this option is not sepcified
+
+
+# TODO
+# Configure logging level
+logging:
+  # panic, fatal, error, warning, info, or debug. Default is info
+  level: info
+  # json or text formats currently available. Default is text
+  format: text
+  # Disable timestamp logging. useful when output is redirected to logging system that already adds timestamps. Default is false
+  #disable_timestamp: true
+  # timestamp format is specified in Go time format, see:
+  #     https://golang.org/pkg/time/#pkg-constants
+  # default when `format: json`: "2006-01-02T15:04:05Z07:00" (RFC3339)
+  # default when `format: text`:
+  #     when TTY attached: seconds since beginning of execution
+  #     otherwise: "2006-01-02T15:04:05Z07:00" (RFC3339)
+  # As an example, to log as RFC3339 with millisecond precision, set to:
+  #timestamp_format: "2006-01-02T15:04:05.000Z07:00"
+
+#stats:
+  #type: graphite
+  #prefix: nebula
+  #protocol: tcp
+  #host: 127.0.0.1:9999
+  #interval: 10s
+
+  #type: prometheus
+  #listen: 127.0.0.1:8080
+  #path: /metrics
+  #namespace: prometheusns
+  #subsystem: nebula
+  #interval: 10s
+
+  # enables counter metrics for meta packets
+  #   e.g.: `messages.tx.handshake`
+  # NOTE: `message.{tx,rx}.recv_error` is always emitted
+  #message_metrics: false
+
+  # enables detailed counter metrics for lighthouse packets
+  #   e.g.: `lighthouse.rx.HostQuery`
+  #lighthouse_metrics: false
+
+# Handshake Manager Settings
+#handshakes:
+  # Handshakes are sent to all known addresses at each interval with a linear backoff,
+  # Wait try_interval after the 1st attempt, 2 * try_interval after the 2nd, etc, until the handshake is older than timeout
+  # A 100ms interval with the default 10 retries will give a handshake 5.5 seconds to resolve before timing out
+  #try_interval: 100ms
+  #retries: 20
+  # trigger_buffer is the size of the buffer channel for quickly sending handshakes
+  # after receiving the response for lighthouse queries
+  #trigger_buffer: 64
+
+
+# Nebula security group configuration
+firewall:
+  conntrack:
+    tcp_timeout: 12m
+    udp_timeout: 3m
+    default_timeout: 10m
+    max_connections: 100000
+
+  # The firewall is default deny. There is no way to write a deny rule.
+  # Rules are comprised of a protocol, port, and one or more of host, group, or CIDR
+  # Logical evaluation is roughly: port AND proto AND (ca_sha OR ca_name) AND (host OR group OR groups OR cidr)
+  # - port: Takes `0` or `any` as any, a single number `80`, a range `200-901`, or `fragment` to match second and further fragments of fragmented packets (since there is no port available).
+  #   code: same as port but makes more sense when talking about ICMP, TODO: this is not currently implemented in a way that works, use `any`
+  #   proto: `any`, `tcp`, `udp`, or `icmp`
+  #   host: `any` or a literal hostname, ie `test-host`
+  #   group: `any` or a literal group name, ie `default-group`
+  #   groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
+  #   cidr: a CIDR, `0.0.0.0/0` is any.
+  #   ca_name: An issuing CA name
+  #   ca_sha: An issuing CA shasum
+
+  outbound:
+    # Allow all outbound traffic from this node
+    - port: any
+      proto: any
+      host: any
+
+  inbound:
+    - port: any
+      proto: any
+      host: any
diff --git a/charts/vpn-mesh-config/templates/certificate-authority.yaml b/charts/vpn-mesh-config/templates/certificate-authority.yaml
new file mode 100644
index 0000000..90e3f9b
--- /dev/null
+++ b/charts/vpn-mesh-config/templates/certificate-authority.yaml
@@ -0,0 +1,9 @@
+apiVersion: lekva.me/v1
+kind: NebulaCA
+metadata:
+  name: {{ .Values.certificateAuthority.name }}
+  namespace: {{ .Release.Namespace }}
+spec:
+  secretName: {{ .Values.certificateAuthority.secretName }}
+
+
diff --git a/charts/vpn-mesh-config/templates/lighthouse-config.yaml b/charts/vpn-mesh-config/templates/lighthouse-config.yaml
new file mode 100644
index 0000000..b318546
--- /dev/null
+++ b/charts/vpn-mesh-config/templates/lighthouse-config.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: lighthouse-config
+  namespace: {{ .Release.Namespace }}
+data:
+{{ (.Files.Glob "lighthouse.yaml").AsConfig | replace "<INTERNAL_IP>" .Values.lighthouse.internalIP | replace "<EXTERNAL_IP>" .Values.lighthouse.externalIP | replace "<PORT>" .Values.lighthouse.port | indent 2 }}
diff --git a/charts/vpn-mesh-config/templates/nebula-node.yaml b/charts/vpn-mesh-config/templates/nebula-node.yaml
new file mode 100644
index 0000000..d4bc6d2
--- /dev/null
+++ b/charts/vpn-mesh-config/templates/nebula-node.yaml
@@ -0,0 +1,10 @@
+apiVersion: lekva.me/v1
+kind: NebulaNode
+metadata:
+  name: {{ .Values.lighthouse.name }}
+  namespace: {{ .Release.Namespace }}
+spec:
+  caName: {{ .Values.certificateAuthority.name }}
+  caNamespace: {{ .Release.Namespace }}
+  ipCidr: {{ .Values.lighthouse.internalIP }}/24
+  secretName: {{ .Values.lighthouse.secretName }}
diff --git a/charts/vpn-mesh-config/values.yaml b/charts/vpn-mesh-config/values.yaml
new file mode 100644
index 0000000..25562a2
--- /dev/null
+++ b/charts/vpn-mesh-config/values.yaml
@@ -0,0 +1,10 @@
+certificateAuthority:
+  name: "nebula"
+  secretName: "ca-nebula-cert"
+
+lighthouse:
+  name: "lighthouse"
+  secretName: "node-lighthouse-cert"
+  internalIP: "0.0.0.0"
+  externalIP: "0.0.0.0"
+  port: "4242"
diff --git a/helmfile/base/helmfile.yaml b/helmfile/base/helmfile.yaml
new file mode 100644
index 0000000..d9da8e9
--- /dev/null
+++ b/helmfile/base/helmfile.yaml
@@ -0,0 +1,37 @@
+repositories:
+- name: ingress-nginx
+  url: https://kubernetes.github.io/ingress-nginx
+
+helmDefaults:
+  tillerless: true
+
+releases:
+- name: ingress-public
+  chart: ingress-nginx/ingress-nginx
+  version: 4.0.3
+  namespace: {{ .Values.id }}-ingress-public
+  createNamespace: true
+  values:
+  - fullnameOverride: {{ .Values.id }}-ingress-public
+  - controller:
+      service:
+        type: LoadBalancer
+      ingressClassByName: true
+      ingressClassResource:
+        name: {{ .Values.id }}-ingress-public
+        enabled: true
+        default: false
+        controllerValue: k8s.io/{{ .Values.id }}-ingress-public
+      config:
+        proxy-body-size: 100M
+      tcp:
+      - 25: {{ .Values.id }}-app-maddy/maddy:25
+      - 143: {{ .Values.id }}-app-maddy/maddy:143
+      - 993: {{ .Values.id }}-app-maddy/maddy:993
+      - 587: {{ .Values.id }}-app-maddy/maddy:587
+      - 465: {{ .Values.id }}-app-maddy/maddy:465
+
+environments:
+  shveli:
+    values:
+      - id: shveli
diff --git a/helmfile/users/helmfile.yaml b/helmfile/users/helmfile.yaml
new file mode 100644
index 0000000..8953746
--- /dev/null
+++ b/helmfile/users/helmfile.yaml
@@ -0,0 +1,76 @@
+repositories:
+- name: ingress-nginx
+  url: https://kubernetes.github.io/ingress-nginx
+
+helmDefaults:
+  tillerless: true
+
+releases:
+- name: vpn-mesh-config
+  chart: ../../charts/vpn-mesh-config
+  namespace: {{ .Values.id }}-ingress-private
+  createNamespace: true
+  values:
+  - certificateAuthority:
+      name: {{ .Values.id }}
+      secretName: ca-{{ .Values.id }}-cert
+  - lighthouse:
+      internalIP: 111.0.0.1
+      externalIP: 46.49.35.44
+      port: "4243"
+- name: ingress-private
+  chart: ingress-nginx/ingress-nginx
+  version: 4.0.3
+  namespace: {{ .Values.id }}-ingress-private
+  createNamespace: true
+  values:
+  - fullnameOverride: nginx
+  - controller:
+      service:
+        type: ClusterIP
+      ingressClassByName: true
+      ingressClassResource:
+        name: {{ .Values.id }}-ingress-private
+        enabled: true
+        default: false
+        controllerValue: k8s.io/{{ .Values.id }}-ingress-private
+      extraVolumes:
+      - name: lighthouse-cert
+        secret:
+          secretName: node-lighthouse-cert
+      - name: config
+        configMap:
+          name: lighthouse-config
+      extraContainers:
+      - name: lighthouse
+        image: giolekva/nebula:latest
+        imagePullPolicy: IfNotPresent
+        securityContext:
+          privileged: true
+          capabilities:
+            add:
+            - NET_ADMIN
+        ports:
+        - name: nebula
+          containerPort: 4242
+          protocol: UDP
+        command:
+        - nebula
+        - --config=/etc/nebula/config/lighthouse.yaml
+        volumeMounts:
+        - name: lighthouse-cert
+          mountPath: /etc/nebula/lighthouse
+        - name: config
+          mountPath: /etc/nebula/config
+      config:
+        bind-address: 111.0.0.1
+        proxy-body-size: 0
+      udp:
+      - 53: {{ .Values.id }}-app-pihole/pihole-dns-udp:53
+      tcp:
+      - 53: {{ .Values.id }}-app-pihole/pihole-dns-tcp:53
+
+environments:
+  shveli:
+    values:
+      - id: shveli
diff --git a/scripts/homelab/installer/ingress-nginx.sh b/scripts/homelab/installer/ingress-nginx.sh
index 7e28d53..98ba8f6 100644
--- a/scripts/homelab/installer/ingress-nginx.sh
+++ b/scripts/homelab/installer/ingress-nginx.sh
@@ -4,7 +4,7 @@
 #      --namespace ingress-nginx \
 #      nginx ingress-nginx/ingress-nginx \
 #      --version 4.0.3 \
-#      --set fullNameOverride=nginx \
+#      --set fullnameOverride=nginx \
 #      --set controller.service.type=LoadBalancer \
 #      --set controller.ingressClassByName=true \
 #      --set controller.ingressClassResource.name=nginx \
@@ -18,7 +18,6 @@
 #      --set tcp.993="app-maddy/maddy:993" \
 #      --set tcp.587="app-maddy/maddy:587" \
 #      --set tcp.465="app-maddy/maddy:465"
-# #    --set udp.4242="ingress-nginx-private/lighthouse:4242"
 
 # kubectl create configmap \
 # 	-n ingress-nginx-private \