Auth: hydra deployment + basic integration with kratos/auth-ui flow
diff --git a/core/auth/hydra/Dockerfile b/core/auth/hydra/Dockerfile
new file mode 100644
index 0000000..96ec8fa
--- /dev/null
+++ b/core/auth/hydra/Dockerfile
@@ -0,0 +1,24 @@
+FROM alpine:3.14.2
+
+RUN addgroup -S ory; \
+ adduser -S ory -G ory -D -u 10000 -h /home/ory -s /bin/nologin; \
+ chown -R ory:ory /home/ory
+
+RUN apk add -U --no-cache ca-certificates
+
+WORKDIR /downloads
+RUN wget https://github.com/ory/hydra/releases/download/v1.10.6/hydra_1.10.6_linux_arm64.tar.gz -O hydra.tar.gz
+RUN tar -xvf hydra.tar.gz
+RUN mv hydra /usr/bin
+
+VOLUME /home/ory
+WORKDIR /home/ory
+RUN rm -r /downloads
+
+# Declare the standard ports used by Hydra (4433 for public service endpoint, 4434 for admin service endpoint)
+EXPOSE 4433 4434
+
+USER 10000
+
+ENTRYPOINT ["hydra"]
+CMD ["serve"]
diff --git a/core/auth/hydra/Makefile b/core/auth/hydra/Makefile
new file mode 100644
index 0000000..232a7d7
--- /dev/null
+++ b/core/auth/hydra/Makefile
@@ -0,0 +1,5 @@
+image_arm64:
+ docker build --tag=giolekva/ory-hydra:latest .
+
+push_arm64: image_arm64
+ docker push giolekva/ory-hydra:latest
diff --git a/core/auth/hydra/hydra.yaml b/core/auth/hydra/hydra.yaml
new file mode 100644
index 0000000..a650e39
--- /dev/null
+++ b/core/auth/hydra/hydra.yaml
@@ -0,0 +1,56 @@
+version: v1.10.6
+
+dsn: postgres://postgres:psswd@postgres:5432/hydra?sslmode=disable&max_conns=20&max_idle_conns=4
+
+serve:
+ cookies:
+ same_site_mode: None
+ public:
+ cors:
+ enabled: true
+ debug: true
+ allow_credentials: true
+ allowed_origins:
+ - https://lekva.me
+ - https://*.lekva.me
+ admin:
+ # host: localhost
+ cors:
+ allowed_origins:
+ - https://hydra.pcloud
+ tls:
+ allow_termination_from:
+ - 0.0.0.0/0
+ - 10.42.0.0/16
+ - 10.43.0.0/16
+ - 111.0.0.1/32
+ tls:
+ allow_termination_from:
+ - 0.0.0.0/0
+ - 10.42.0.0/16
+ - 10.43.0.0/16
+ - 111.0.0.1/32
+
+urls:
+ self:
+ public: https://hydra.lekva.me
+ issuer: https://hydra.lekva.me
+ consent: https://accounts-ui.lekva.me/consent
+ login: https://accounts-ui.lekva.me/login
+ logout: https://accounts-ui.lekva.me/logout
+
+secrets:
+ system:
+ - youReallyNeedToChangeThis
+
+oidc:
+ subject_identifiers:
+ supported_types:
+ - pairwise
+ - public
+ pairwise:
+ salt: youReallyNeedToChangeThis
+
+log:
+ level: trace
+ leak_sensitive_values: true
diff --git a/core/auth/hydra/install.yaml b/core/auth/hydra/install.yaml
new file mode 100644
index 0000000..b9a8d4d
--- /dev/null
+++ b/core/auth/hydra/install.yaml
@@ -0,0 +1,161 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: core-auth
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: hydra
+ namespace: core-auth
+spec:
+ type: ClusterIP
+ selector:
+ app: hydra
+ ports:
+ - name: public
+ port: 80
+ targetPort: public
+ protocol: TCP
+ - name: admin
+ port: 81
+ targetPort: admin
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: ingress-hydra-public
+ namespace: core-auth
+ annotations:
+ cert-manager.io/cluster-issuer: "letsencrypt-prod"
+ acme.cert-manager.io/http01-edit-in-place: "true"
+spec:
+ ingressClassName: nginx
+ tls:
+ - hosts:
+ - hydra.lekva.me
+ secretName: cert-hydra.lekva.me
+ rules:
+ - host: hydra.lekva.me
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: hydra
+ port:
+ name: public
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: ingress-hydra-private
+ namespace: core-auth
+ annotations:
+ cert-manager.io/cluster-issuer: "selfsigned-ca"
+ acme.cert-manager.io/http01-edit-in-place: "true"
+spec:
+ ingressClassName: nginx-private
+ tls:
+ - hosts:
+ - hydra.pcloud
+ secretName: cert-hydra.pcloud
+ rules:
+ - host: hydra.pcloud
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: hydra
+ port:
+ name: admin
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: hydra
+ namespace: core-auth
+spec:
+ selector:
+ matchLabels:
+ app: hydra
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: hydra
+ spec:
+ volumes:
+ - name: config
+ configMap:
+ name: hydra
+ containers:
+ - name: hydra
+ image: giolekva/ory-hydra:latest
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: public
+ containerPort: 4444
+ protocol: TCP
+ - name: admin
+ containerPort: 4445
+ protocol: TCP
+ command:
+ - "hydra"
+ - "--config=/etc/hydra/config/hydra.yaml"
+ - "serve"
+ - "all"
+ #command: ["hydra", "serve"]
+ # resources:
+ # requests:
+ # memory: "10Mi"
+ # cpu: "10m"
+ # limits:
+ # memory: "20Mi"
+ # cpu: "100m"
+ volumeMounts:
+ - name: config
+ mountPath: /etc/hydra/config
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: hydra-migrate
+ namespace: core-auth
+spec:
+ template:
+ metadata:
+ labels:
+ app: hydra-migrate
+ spec:
+ restartPolicy: OnFailure
+ volumes:
+ - name: config
+ configMap:
+ name: hydra
+ containers:
+ - name: hydra
+ image: giolekva/ory-hydra:latest
+ imagePullPolicy: IfNotPresent
+ command:
+ - "hydra"
+ - "migrate"
+ - "sql"
+ - "-y"
+ - "postgres://postgres:psswd@postgres:5432/hydra?sslmode=disable&max_conns=20&max_idle_conns=4"
+ #command: ["hydra", "serve"]
+ # resources:
+ # requests:
+ # memory: "10Mi"
+ # cpu: "10m"
+ # limits:
+ # memory: "20Mi"
+ # cpu: "100m"
+ volumeMounts:
+ - name: config
+ mountPath: /etc/hydra/config
diff --git a/core/auth/ui/.gitignore b/core/auth/ui/.gitignore
new file mode 100644
index 0000000..254defd
--- /dev/null
+++ b/core/auth/ui/.gitignore
@@ -0,0 +1 @@
+server
diff --git a/core/auth/ui/Makefile b/core/auth/ui/Makefile
index c444c97..70bf15c 100644
--- a/core/auth/ui/Makefile
+++ b/core/auth/ui/Makefile
@@ -5,7 +5,7 @@
rm -f server
image: clean build
- docker build --tag=giolekva/auth-ui .
+ docker build --tag=giolekva/auth-ui . --platform=linux/arm64
push: image
docker push giolekva/auth-ui:latest
diff --git a/core/auth/ui/install.yaml b/core/auth/ui/install.yaml
index ec82b9a..35ee23a 100644
--- a/core/auth/ui/install.yaml
+++ b/core/auth/ui/install.yaml
@@ -60,9 +60,19 @@
labels:
app: kratos-selfservice-ui
spec:
+ volumes:
+ - name: cert
+ secret:
+ secretName: node-auth-ui-cert
+ - name: config
+ configMap:
+ name: auth-ui-lighthouse-config
+ hostAliases:
+ - ip: "111.0.0.1"
+ hostnames:
+ - "hydra.pcloud"
containers:
- name: server
- # image: giolekva/ory-kratos-selfservice-ui:latest
image: giolekva/auth-ui:latest
imagePullPolicy: Always
env:
@@ -80,3 +90,33 @@
# limits:
# memory: "20Mi"
# cpu: "100m"
+ - name: lighthouse
+ image: giolekva/nebula:latest
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ capabilities:
+ add: ["NET_ADMIN"]
+ privileged: true
+ ports:
+ - name: lighthouse
+ containerPort: 4247
+ protocol: UDP
+ command: ["nebula", "--config=/etc/nebula/config/lighthouse.yaml"]
+ volumeMounts:
+ - name: cert
+ mountPath: /etc/nebula/lighthouse
+ readOnly: true
+ - name: config
+ mountPath: /etc/nebula/config
+ readOnly: true
+---
+apiVersion: lekva.me/v1
+kind: NebulaNode
+metadata:
+ name: auth-ui
+ namespace: core-auth
+spec:
+ caName: pcloud
+ caNamespace: ingress-nginx-private
+ ipCidr: "111.0.0.10/24"
+ secretName: node-auth-ui-cert
diff --git a/core/auth/ui/lighthouse.yaml b/core/auth/ui/lighthouse.yaml
new file mode 100644
index 0000000..7b6ad10
--- /dev/null
+++ b/core/auth/ui/lighthouse.yaml
@@ -0,0 +1,246 @@
+# This is the nebula example configuration file. You must edit, at a minimum, the static_host_map, lighthouse, and firewall sections
+# Some options in this file are HUPable, including the pki section. (A HUP will reload credentials from disk without affecting existing tunnels)
+
+# PKI defines the location of credentials for this node. Each of these can also be inlined by using the yaml ": |" syntax.
+pki:
+ # The CAs that are accepted by this node. Must contain one or more certificates created by 'nebula-cert ca'
+ ##ca: /etc/nebula/ca/ca.crt
+ ca: /etc/nebula/lighthouse/ca.crt
+ cert: /etc/nebula/lighthouse/host.crt
+ key: /etc/nebula/lighthouse/host.key
+ #blocklist is a list of certificate fingerprints that we will refuse to talk to
+ #blocklist:
+ # - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72
+
+# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
+# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
+# The syntax is:
+# "{nebula ip}": ["{routable ip/dns name}:{routable port}"]
+# Example, if your lighthouse has the nebula IP of 192.168.100.1 and has the real ip address of 100.64.22.11 and runs on port 4242:
+static_host_map:
+ "111.0.0.1": ["46.49.35.44:4242"]
+
+
+lighthouse:
+ # am_lighthouse is used to enable lighthouse functionality for a node. This should ONLY be true on nodes
+ # you have configured to be lighthouses in your network
+ am_lighthouse: false
+ # serve_dns optionally starts a dns listener that responds to various queries and can even be
+ # delegated to for resolution
+ #serve_dns: false
+ #dns:
+ # The DNS host defines the IP to bind the dns listener to. This also allows binding to the nebula node IP.
+ #host: 0.0.0.0
+ #port: 53
+ # interval is the number of seconds between updates from this node to a lighthouse.
+ # during updates, a node sends information about its current IP addresses to each node.
+ interval: 60
+ # hosts is a list of lighthouse hosts this node should report to and query from
+ # IMPORTANT: THIS SHOULD BE EMPTY ON LIGHTHOUSE NODES
+ # IMPORTANT2: THIS SHOULD BE LIGHTHOUSES' NEBULA IPs, NOT LIGHTHOUSES' REAL ROUTABLE IPs
+ hosts:
+ - "111.0.0.1"
+
+ # remote_allow_list allows you to control ip ranges that this node will
+ # consider when handshaking to another node. By default, any remote IPs are
+ # allowed. You can provide CIDRs here with `true` to allow and `false` to
+ # deny. The most specific CIDR rule applies to each remote. If all rules are
+ # "allow", the default will be "deny", and vice-versa. If both "allow" and
+ # "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the
+ # default.
+ #remote_allow_list:
+ # Example to block IPs from this subnet from being used for remote IPs.
+ #"172.16.0.0/12": false
+
+ # A more complicated example, allow public IPs but only private IPs from a specific subnet
+ #"0.0.0.0/0": true
+ #"10.0.0.0/8": false
+ #"10.42.42.0/24": true
+
+ # local_allow_list allows you to filter which local IP addresses we advertise
+ # to the lighthouses. This uses the same logic as `remote_allow_list`, but
+ # additionally, you can specify an `interfaces` map of regular expressions
+ # to match against interface names. The regexp must match the entire name.
+ # All interface rules must be either true or false (and the default will be
+ # the inverse). CIDR rules are matched after interface name rules.
+ # Default is all local IP addresses.
+ #local_allow_list:
+ # Example to block tun0 and all docker interfaces.
+ #interfaces:
+ #tun0: false
+ #'docker.*': false
+ # Example to only advertise this subnet to the lighthouse.
+ #"10.0.0.0/8": true
+
+# Port Nebula will be listening on. The default here is 4242. For a lighthouse node, the port should be defined,
+# however using port 0 will dynamically assign a port and is recommended for roaming nodes.
+listen:
+ # To listen on both any ipv4 and ipv6 use "[::]"
+ host: "[::]"
+ port: 4247
+ # Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg)
+ # default is 64, does not support reload
+ #batch: 64
+ # Configure socket buffers for the udp side (outside), leave unset to use the system defaults. Values will be doubled by the kernel
+ # Default is net.core.rmem_default and net.core.wmem_default (/proc/sys/net/core/rmem_default and /proc/sys/net/core/rmem_default)
+ # Maximum is limited by memory in the system, SO_RCVBUFFORCE and SO_SNDBUFFORCE is used to avoid having to raise the system wide
+ # max, net.core.rmem_max and net.core.wmem_max
+ #read_buffer: 10485760
+ #write_buffer: 10485760
+
+# EXPERIMENTAL: This option is currently only supported on linux and may
+# change in future minor releases.
+#
+# Routines is the number of thread pairs to run that consume from the tun and UDP queues.
+# Currently, this defaults to 1 which means we have 1 tun queue reader and 1
+# UDP queue reader. Setting this above one will set IFF_MULTI_QUEUE on the tun
+# device and SO_REUSEPORT on the UDP socket to allow multiple queues.
+#routines: 1
+
+punchy:
+ # Continues to punch inbound/outbound at a regular interval to avoid expiration of firewall nat mappings
+ punch: true
+
+ # respond means that a node you are trying to reach will connect back out to you if your hole punching fails
+ # this is extremely useful if one node is behind a difficult nat, such as a symmetric NAT
+ # Default is false
+ #respond: true
+
+ # delays a punch response for misbehaving NATs, default is 1 second, respond must be true to take effect
+ #delay: 1s
+
+# Cipher allows you to choose between the available ciphers for your network. Options are chachapoly or aes
+# IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously!
+cipher: chachapoly
+
+# Local range is used to define a hint about the local network range, which speeds up discovering the fastest
+# path to a network adjacent nebula node.
+#local_range: "172.16.0.0/24"
+
+# sshd can expose informational and administrative functions via ssh this is a
+#sshd:
+ # Toggles the feature
+ #enabled: true
+ # Host and port to listen on, port 22 is not allowed for your safety
+ #listen: 127.0.0.1:2222
+ # A file containing the ssh host private key to use
+ # A decent way to generate one: ssh-keygen -t ed25519 -f ssh_host_ed25519_key -N "" < /dev/null
+ #host_key: ./ssh_host_ed25519_key
+ # A file containing a list of authorized public keys
+ #authorized_users:
+ #- user: steeeeve
+ # keys can be an array of strings or single string
+ #keys:
+ #- "ssh public key string"
+
+# Configure the private interface. Note: addr is baked into the nebula certificate
+tun:
+ # When tun is disabled, a lighthouse can be started without a local tun interface (and therefore without root)
+ disabled: false
+ # Name of the device
+ dev: nebula1
+ # Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert
+ drop_local_broadcast: false
+ # Toggles forwarding of multicast packets
+ drop_multicast: false
+ # Sets the transmit queue length, if you notice lots of transmit drops on the tun it may help to raise this number. Default is 500
+ tx_queue: 500
+ # Default MTU for every packet, safe setting is (and the default) 1300 for internet based traffic
+ mtu: 576
+ # Route based MTU overrides, you have known vpn ip paths that can support larger MTUs you can increase/decrease them here
+ routes:
+ #- mtu: 8800
+ # route: 10.0.0.0/16
+ # Unsafe routes allows you to route traffic over nebula to non-nebula nodes
+ # Unsafe routes should be avoided unless you have hosts/services that cannot run nebula
+ # NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate
+ unsafe_routes:
+ #- route: 172.16.1.0/24
+ # via: 192.168.100.99
+ # mtu: 1300 #mtu will default to tun mtu if this option is not sepcified
+
+
+# TODO
+# Configure logging level
+logging:
+ # panic, fatal, error, warning, info, or debug. Default is info
+ level: info
+ # json or text formats currently available. Default is text
+ format: text
+ # Disable timestamp logging. useful when output is redirected to logging system that already adds timestamps. Default is false
+ #disable_timestamp: true
+ # timestamp format is specified in Go time format, see:
+ # https://golang.org/pkg/time/#pkg-constants
+ # default when `format: json`: "2006-01-02T15:04:05Z07:00" (RFC3339)
+ # default when `format: text`:
+ # when TTY attached: seconds since beginning of execution
+ # otherwise: "2006-01-02T15:04:05Z07:00" (RFC3339)
+ # As an example, to log as RFC3339 with millisecond precision, set to:
+ #timestamp_format: "2006-01-02T15:04:05.000Z07:00"
+
+#stats:
+ #type: graphite
+ #prefix: nebula
+ #protocol: tcp
+ #host: 127.0.0.1:9999
+ #interval: 10s
+
+ #type: prometheus
+ #listen: 127.0.0.1:8080
+ #path: /metrics
+ #namespace: prometheusns
+ #subsystem: nebula
+ #interval: 10s
+
+ # enables counter metrics for meta packets
+ # e.g.: `messages.tx.handshake`
+ # NOTE: `message.{tx,rx}.recv_error` is always emitted
+ #message_metrics: false
+
+ # enables detailed counter metrics for lighthouse packets
+ # e.g.: `lighthouse.rx.HostQuery`
+ #lighthouse_metrics: false
+
+# Handshake Manager Settings
+#handshakes:
+ # Handshakes are sent to all known addresses at each interval with a linear backoff,
+ # Wait try_interval after the 1st attempt, 2 * try_interval after the 2nd, etc, until the handshake is older than timeout
+ # A 100ms interval with the default 10 retries will give a handshake 5.5 seconds to resolve before timing out
+ #try_interval: 100ms
+ #retries: 20
+ # trigger_buffer is the size of the buffer channel for quickly sending handshakes
+ # after receiving the response for lighthouse queries
+ #trigger_buffer: 64
+
+
+# Nebula security group configuration
+firewall:
+ conntrack:
+ tcp_timeout: 12m
+ udp_timeout: 3m
+ default_timeout: 10m
+ max_connections: 100000
+
+ # The firewall is default deny. There is no way to write a deny rule.
+ # Rules are comprised of a protocol, port, and one or more of host, group, or CIDR
+ # Logical evaluation is roughly: port AND proto AND (ca_sha OR ca_name) AND (host OR group OR groups OR cidr)
+ # - port: Takes `0` or `any` as any, a single number `80`, a range `200-901`, or `fragment` to match second and further fragments of fragmented packets (since there is no port available).
+ # code: same as port but makes more sense when talking about ICMP, TODO: this is not currently implemented in a way that works, use `any`
+ # proto: `any`, `tcp`, `udp`, or `icmp`
+ # host: `any` or a literal hostname, ie `test-host`
+ # group: `any` or a literal group name, ie `default-group`
+ # groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
+ # cidr: a CIDR, `0.0.0.0/0` is any.
+ # ca_name: An issuing CA name
+ # ca_sha: An issuing CA shasum
+
+ outbound:
+ # Allow all outbound traffic from this node
+ - port: any
+ proto: any
+ host: any
+
+ inbound:
+ - port: any
+ proto: any
+ host: any
diff --git a/core/auth/ui/main.go b/core/auth/ui/main.go
index 0546802..92ca885 100644
--- a/core/auth/ui/main.go
+++ b/core/auth/ui/main.go
@@ -2,6 +2,7 @@
import (
"bytes"
+ "crypto/tls"
"embed"
"encoding/json"
"errors"
@@ -14,6 +15,7 @@
"net/http"
"net/http/cookiejar"
"net/url"
+ "strings"
"github.com/gorilla/mux"
"github.com/itaysk/regogo"
@@ -172,6 +174,15 @@
}
flow, ok := r.Form["flow"]
if !ok {
+ challenge, ok := r.Form["login_challenge"]
+ if ok {
+ // TODO(giolekva): encrypt
+ http.SetCookie(w, &http.Cookie{
+ Name: "login_challenge",
+ Value: challenge[0],
+ HttpOnly: true,
+ })
+ }
http.Redirect(w, r, s.kratos+"/self-service/login/browser", http.StatusSeeOther)
return
}
@@ -303,7 +314,44 @@
for _, c := range resp.Cookies() {
http.SetCookie(w, c)
}
- http.Redirect(w, r, "/", http.StatusSeeOther)
+ if challenge, _ := r.Cookie("login_challenge"); challenge != nil {
+ username, err := getWhoAmIFromKratos(resp.Cookies())
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ req := &http.Request{
+ Method: http.MethodPut,
+ URL: &url.URL{
+ Scheme: "https",
+ Host: "hydra.pcloud",
+ Path: "/oauth2/auth/requests/login/accept",
+ RawQuery: fmt.Sprintf("login_challenge=%s", challenge.Value),
+ },
+ Header: map[string][]string{
+ "Content-Type": []string{"text/html"},
+ },
+ // TODO(giolekva): user stable userid instead
+ Body: io.NopCloser(strings.NewReader(fmt.Sprintf(`
+{
+ "subject": "%s",
+ "remember": true,
+ "remember_for": 3600
+}`, username))),
+ }
+ client := &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ },
+ }
+ resp, err := client.Do(req)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ } else {
+ io.Copy(w, resp.Body)
+ }
+ }
+ // http.Redirect(w, r, "/", http.StatusSeeOther)
}
}
diff --git a/core/auth/ui/server b/core/auth/ui/server
deleted file mode 100755
index 042afdd..0000000
--- a/core/auth/ui/server
+++ /dev/null
Binary files differ
diff --git a/core/nebula/controller/Makefile b/core/nebula/controller/Makefile
index 3f7a57e..7fa4716 100644
--- a/core/nebula/controller/Makefile
+++ b/core/nebula/controller/Makefile
@@ -6,12 +6,12 @@
./hack/generate.sh
controller: clean
- go1.16 mod tidy
- go1.16 mod vendor
- go1.16 build -o controller main.go
+ go mod tidy
+ go mod vendor
+ go build -o controller main.go
web: clean
- go1.16 build -o web web.go
+ go build -o web web.go
# image: clean build
diff --git a/scripts/homelab/installer/auth.sh b/scripts/homelab/installer/auth.sh
index 59938ad..6f1f928 100644
--- a/scripts/homelab/installer/auth.sh
+++ b/scripts/homelab/installer/auth.sh
@@ -20,7 +20,13 @@
# kubectl create configmap kratos -n core-auth --from-file=../../core/auth/kratos.yaml
# kubectl create configmap identity -n core-auth --from-file=../../core/auth/identity.schema.json
# kubectl apply -f ../../core/auth/kratos/install.yaml
-# kubectl apply -f ../../core/auth/ui/install.yaml
+
+kubectl create configmap \
+ -n core-auth \
+ auth-ui-lighthouse-config \
+ --from-file ../../core/auth/ui/lighthouse.yaml
+
+kubectl apply -f ../../core/auth/ui/install.yaml
kubectl create configmap hydra -n core-auth --from-file=../../core/auth/hydra/hydra.yaml
kubectl apply -f ../../core/auth/hydra/install.yaml