Ingress: Improve port opening
Update nginx replica counts and rollout strategy.
What proxy-backend configmap changes and reload auto proxy nginx.
Make ingress optional.
Use <PREFIX>.<CLUSTER_NAME>.clusters.p.<DOMAIN> addresses for remote
cluster services.
Change-Id: Ideb146a8c0275822ee5cd28311c00a817f4202aa
diff --git a/apps/reload/Dockerfile b/apps/reload/Dockerfile
new file mode 100644
index 0000000..1f2e54c
--- /dev/null
+++ b/apps/reload/Dockerfile
@@ -0,0 +1,7 @@
+# FROM gcr.io/distroless/static:nonroot
+# TODO(gio): remove busybox
+FROM busybox:latest
+
+ARG TARGETARCH
+
+COPY reload_${TARGETARCH} /usr/bin/reload
diff --git a/apps/reload/Makefile b/apps/reload/Makefile
new file mode 100644
index 0000000..aeba9ff
--- /dev/null
+++ b/apps/reload/Makefile
@@ -0,0 +1,35 @@
+repo_name ?= giolekva
+podman ?= docker
+docker_flags=--provenance=false --sbom=false
+ifeq ($(podman), podman)
+manifest_dest=docker://docker.io/$(repo_name)/reload:latest
+endif
+
+clean:
+ rm -f reload
+
+build_arm64: export CGO_ENABLED=0
+build_arm64: export GO111MODULE=on
+build_arm64: export GOOS=linux
+build_arm64: export GOARCH=arm64
+build_arm64:
+ go build -o reload_arm64 *.go
+
+build_amd64: export CGO_ENABLED=0
+build_amd64: export GO111MODULE=on
+build_amd64: export GOOS=linux
+build_amd64: export GOARCH=amd64
+build_amd64:
+ go build -o reload_amd64 *.go
+
+push_arm64: clean build_arm64
+ $(podman) build --platform linux/arm64 --tag=$(repo_name)/reload:arm64 $(docker_flags) .
+ $(podman) push $(repo_name)/reload:arm64
+
+push_amd64: clean build_amd64
+ $(podman) build --platform linux/amd64 --tag=$(repo_name)/reload:amd64 $(docker_flags) .
+ $(podman) push $(repo_name)/reload:amd64
+
+push: push_arm64 push_amd64
+ $(podman) manifest create $(repo_name)/reload:latest $(repo_name)/reload:arm64 $(repo_name)/reload:amd64
+ $(podman) manifest push --purge $(repo_name)/reload:latest $(manifest_dest)
diff --git a/apps/reload/go.mod b/apps/reload/go.mod
new file mode 100644
index 0000000..a45c664
--- /dev/null
+++ b/apps/reload/go.mod
@@ -0,0 +1,3 @@
+module github.com/giolekva/pcloud/apps/reload
+
+go 1.23.1
diff --git a/apps/reload/main.go b/apps/reload/main.go
new file mode 100644
index 0000000..a06d16f
--- /dev/null
+++ b/apps/reload/main.go
@@ -0,0 +1,68 @@
+package main
+
+import (
+ "crypto/sha256"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "syscall"
+ "time"
+)
+
+var watch = flag.String("watch", "", "Path to watch")
+var reload = flag.String("reload", "", "Path to PID file")
+
+func check(err error) {
+ if err != nil {
+ panic(err)
+ }
+}
+
+func main() {
+ flag.Parse()
+ var prev string
+ for {
+ cur := func() string {
+ inp, err := os.Open(*watch)
+ check(err)
+ defer inp.Close()
+ h := sha256.New()
+ _, err = io.Copy(h, inp)
+ check(err)
+ return string(h.Sum(nil))
+ }()
+ if prev == "" {
+ prev = cur
+ continue
+ }
+ if prev != cur {
+ prev = cur
+ fmt.Println("changed")
+ pid := func() int {
+ inp, err := os.Open(*reload)
+ // TODO(gio): check error type
+ if err != nil {
+ return -1
+ }
+ defer inp.Close()
+ var ret int
+ _, err = fmt.Fscanf(inp, "%d", &ret)
+ check(err)
+ return ret
+ }()
+ if pid != -1 {
+ p, err := os.FindProcess(pid)
+ check(err)
+ fmt.Println("found process")
+ fmt.Println("%+v\n", p)
+ // TODO(gio): take signal value from flags
+ check(p.Signal(syscall.SIGKILL))
+ fmt.Println("sent signall")
+ }
+ } else {
+ fmt.Println("no change")
+ }
+ time.Sleep(5 * time.Second)
+ }
+}
diff --git a/charts/ingress-nginx/templates/controller-service.yaml b/charts/ingress-nginx/templates/controller-service.yaml
index 2b28196..fd8598d 100644
--- a/charts/ingress-nginx/templates/controller-service.yaml
+++ b/charts/ingress-nginx/templates/controller-service.yaml
@@ -95,6 +95,18 @@
{{- end }}
{{- end }}
{{- end }}
+ {{- range $key, $value := .Values.controller.service.extraPorts.tcp }}
+ - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp
+ port: {{ $key }}
+ protocol: TCP
+ targetPort: {{ $value }}
+ {{- end }}
+ {{- range $key, $value := .Values.controller.service.extraPorts.udp }}
+ - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp
+ port: {{ $key }}
+ protocol: UDP
+ targetPort: {{ $value }}
+ {{- end }}
selector:
{{- include "ingress-nginx.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: controller
diff --git a/charts/ingress-nginx/values.yaml b/charts/ingress-nginx/values.yaml
index 7ca41e7..b3389e2 100644
--- a/charts/ingress-nginx/values.yaml
+++ b/charts/ingress-nginx/values.yaml
@@ -455,6 +455,9 @@
http: http
https: https
type: LoadBalancer
+ extraPorts:
+ tcp: {}
+ udp: {}
## type: NodePort
## nodePorts:
## http: 32080
diff --git a/core/headscale/Dockerfile b/core/headscale/Dockerfile
index 5d76932..d53b665 100644
--- a/core/headscale/Dockerfile
+++ b/core/headscale/Dockerfile
@@ -1,6 +1,5 @@
-FROM headscale/headscale:0.22.3
+FROM headscale/headscale:0.25.1
ARG TARGETARCH
COPY server_${TARGETARCH} /usr/bin/headscale-api
-RUN chmod +x /usr/bin/headscale-api
diff --git a/core/installer/app_configs/app_base.cue b/core/installer/app_configs/app_base.cue
index e64d6d5..8a85417 100644
--- a/core/installer/app_configs/app_base.cue
+++ b/core/installer/app_configs/app_base.cue
@@ -768,7 +768,7 @@
port: input[e.name]
protocol: e.protocol
service: {
- name: "cluster-\(clusterName).devices.\(global.privateDomain)"
+ name: "ingress-\(strings.ToLower(e.protocol)).\(clusterName).cluster.\(global.privateDomain)"
port: input["\(e.name)_cluster"]
}
}]
@@ -794,7 +794,7 @@
port: input[name]
protocol: "TCP"
service: {
- name: "cluster-\(clusterName).devices.\(global.privateDomain)"
+ name: "ingress-\(strings.ToLower(e.protocol)).\(clusterName).cluster.\(global.privateDomain)"
port: input["\(e.name)_cluster"]
}
}, {
@@ -803,7 +803,7 @@
port: input[name]
protocol: "TCP"
service: {
- name: "\(global.id)-nginx-private-controller"
+ name: "proxy-backend-service"
namespace: "\(global.namespacePrefix)ingress-private"
port: input["\(e.name)_private"]
}
diff --git a/core/installer/app_configs/dodo_app.cue b/core/installer/app_configs/dodo_app.cue
index a22efe6..15fc825 100644
--- a/core/installer/app_configs/dodo_app.cue
+++ b/core/installer/app_configs/dodo_app.cue
@@ -46,6 +46,12 @@
for svc in service {
for i, e in svc.expose {
"port_service_\(svc.name)_\(i)": int @role(port)
+ if input.cluster != _|_ {
+ "port_service_\(svc.name)_\(i)_cluster": int @role(port)
+ if strings.ToLower(e.network) == "public" {
+ "port_service_\(svc.name)_\(i)_private": int @role(port)
+ }
+ }
}
}
@@ -120,7 +126,7 @@
#AppTmpl: {
name: string | *"app"
type: string
- ingress: #AppIngress // TODO(gio): make it a list
+ ingress?: #AppIngress // TODO(gio): make it a list
expose: [...#PortDomain] | *[]
rootDir: string
runConfiguration: [...#Command]
@@ -391,10 +397,10 @@
openPort: list.Concat([
[for i, e in svc.expose if e.port.name != _|_ {
for p in svc.ports if e.port.name == p.name {
- name: "port_service_app_\(i)"
+ name: "port_service_\(svc.name)_\(i)"
network: networks[strings.ToLower(e.network)]
- port: input[name] // TODO(gio): app name
- protocol: "TCP"
+ port: input[name]
+ protocol: p.protocol
service: {
name: "app-app"
port: p.value
@@ -403,18 +409,19 @@
}],
[for i, e in svc.expose if e.port.value != _|_ {
for p in svc.ports if e.port.value == p.value {
- name: "port_service_app_\(i)"
+ name: "port_service_\(svc.name)_\(i)"
network: networks[strings.ToLower(e.network)]
- port: input[name] // TODO(gio): app name
- protocol: "TCP"
+ port: input[name]
+ protocol: p.protocol
service: {
name: "app-app"
port: p.value
}
}
}]
- ])
- ingress: {
+ ])
+ ingress: {
+ if svc.ingress != _|_ {
"\(svc.name)": {
label: "App"
network: networks[strings.ToLower(svc.ingress.network)]
@@ -434,7 +441,8 @@
}
}
}
- images: {
+ }
+ images: {
app: {
repository: "giolekva"
name: "app-runner"
@@ -536,26 +544,31 @@
_vmName: "\(_appIdSanitized)-\(svc.name)-\(svc.source.branch)"
"\(svc.name)": #WithOut & {
ingress: {
- "\(svc.name)": {
- label: "App"
- network: networks[strings.ToLower(svc.ingress.network)]
- subdomain: svc.ingress.subdomain
- auth: svc.ingress.auth
- service: {
- name: _vmName
- port: name: "web"
+ if svc.ingress != _|_ {
+ {
+ "\(svc.name)": {
+ label: "App"
+ network: networks[strings.ToLower(svc.ingress.network)]
+ subdomain: svc.ingress.subdomain
+ auth: svc.ingress.auth
+ service: {
+ name: _vmName
+ port: name: "web"
+ }
}
- }
- code: {
- label: "VS Code"
- home: "/?folder=/home/\(svc.dev.username)/code"
- network: networks[strings.ToLower(svc.ingress.network)]
- subdomain: "code-\(svc.ingress.subdomain)"
- auth: enabled: false
- service: {
- name: _vmName
- port: name: _codeServerPortName
+ // TODO(gio): code should work even without svc ingress
+ code: {
+ label: "VS Code"
+ home: "/?folder=/home/\(svc.dev.username)/code"
+ network: networks[strings.ToLower(svc.ingress.network)]
+ subdomain: "code-\(svc.ingress.subdomain)"
+ auth: enabled: false
+ service: {
+ name: _vmName
+ port: name: _codeServerPortName
+ }
}
+ }
}
}
vm: {
diff --git a/core/installer/canvas-app.cue b/core/installer/canvas-app.cue
index 0f54b06..4abee5e 100644
--- a/core/installer/canvas-app.cue
+++ b/core/installer/canvas-app.cue
@@ -10,36 +10,35 @@
"type": "golang:1.24.0",
"source": {
"repository": "https://code.v1.dodo.cloud/pcloud",
- "branch": "test-canvas",
- "rootDir": "apps/canvas/server"
+ "branch": "main",
+ "rootDir": "apps/echo"
},
- "ingress": {
- "network": "private",
- "subdomain": "canvas",
- "auth": {
- "enabled": false
- }
- },
- "dev": {
- "enabled": false
- }
+ "ports": [{
+ "name": "echo",
+ "value": 9090
+ }],
+ "expose": [{
+ "network": "Public",
+ "subdomain": "echo",
+ "port": { "name": "echo" }
+ }]
}],
- "postgresql": [{
- "name": "pg",
- "size": "2Gi",
- "expose": [{
- "network": "public",
- "subdomain": "pg"
- }]
- }],
- "mongodb": [{
- "name": "moo",
- "size": "2Gi",
- "expose": [{
- "network": "public",
- "subdomain": "mo"
- }]
- }],
- "cluster": "asdc"
+ "postgresql": [{
+ "name": "pg",
+ "size": "1Gi",
+ "expose": [{
+ "network": "Public",
+ "subdomain": "pg"
+ }]
+ }],
+ "mongodb": [{
+ "name": "mo",
+ "size": "1Gi",
+ "expose": [{
+ "network": "Public",
+ "subdomain": "mo"
+ }]
+ }],
+ "cluster": "zxczxc"
}
}
diff --git a/core/installer/cluster.go b/core/installer/cluster.go
index ac7d970..a51435f 100644
--- a/core/installer/cluster.go
+++ b/core/installer/cluster.go
@@ -2,8 +2,6 @@
import (
"bytes"
- "crypto/sha256"
- "encoding/base64"
"encoding/json"
"fmt"
"io"
@@ -152,12 +150,9 @@
return "", err
}
defer w.Close()
- h := sha256.New()
- o := io.MultiWriter(w, h)
- if err := cfg.Render(o); err != nil {
+ if err := cfg.Render(w); err != nil {
return "", err
}
- hash := base64.StdEncoding.EncodeToString(h.Sum(nil))
nginxPath := filepath.Join(filepath.Dir(c.ConfigPath), "ingress-nginx.yaml")
nginx, err := func() (map[string]any, error) {
r, err := fs.Reader(nginxPath)
@@ -178,15 +173,6 @@
if err != nil {
return "", err
}
- cv := nginx["spec"].(map[string]any)["values"].(map[string]any)["controller"].(map[string]any)
- var annotations map[string]any
- if a, ok := cv["podAnnotations"]; ok {
- annotations = a.(map[string]any)
- } else {
- annotations = map[string]any{}
- cv["podAnnotations"] = annotations
- }
- annotations["dodo.cloud/hash"] = string(hash)
buf, err := yaml.Marshal(nginx)
if err != nil {
return "", err
@@ -229,12 +215,9 @@
return "", err
}
defer w.Close()
- h := sha256.New()
- o := io.MultiWriter(w, h)
- if err := cfg.Render(o); err != nil {
+ if err := cfg.Render(w); err != nil {
return "", err
}
- hash := base64.StdEncoding.EncodeToString(h.Sum(nil))
nginxPath := filepath.Join(filepath.Dir(c.ConfigPath), "ingress-nginx.yaml")
nginx, err := func() (map[string]any, error) {
r, err := fs.Reader(nginxPath)
@@ -255,15 +238,6 @@
if err != nil {
return "", err
}
- cv := nginx["spec"].(map[string]any)["values"].(map[string]any)["controller"].(map[string]any)
- var annotations map[string]any
- if a, ok := cv["podAnnotations"]; ok {
- annotations = a.(map[string]any)
- } else {
- annotations = map[string]any{}
- cv["podAnnotations"] = annotations
- }
- annotations["dodo.cloud/hash"] = string(hash)
buf, err := yaml.Marshal(nginx)
if err != nil {
return "", err
@@ -346,12 +320,9 @@
return "", err
}
defer w.Close()
- h := sha256.New()
- o := io.MultiWriter(w, h)
- if err := cfg.Render(o); err != nil {
+ if err := cfg.Render(w); err != nil {
return "", err
}
- hash := base64.StdEncoding.EncodeToString(h.Sum(nil))
nginxPath := filepath.Join(filepath.Dir(c.ConfigPath), "ingress-nginx.yaml")
nginx, err := func() (map[string]any, error) {
r, err := fs.Reader(nginxPath)
@@ -372,15 +343,6 @@
if err != nil {
return "", err
}
- cv := nginx["spec"].(map[string]any)["values"].(map[string]any)["controller"].(map[string]any)
- var annotations map[string]any
- if a, ok := cv["podAnnotations"]; ok {
- annotations = a.(map[string]any)
- } else {
- annotations = map[string]any{}
- cv["podAnnotations"] = annotations
- }
- annotations["dodo.cloud/hash"] = string(hash)
buf, err := yaml.Marshal(nginx)
if err != nil {
return "", err
@@ -420,12 +382,9 @@
return "", err
}
defer w.Close()
- h := sha256.New()
- o := io.MultiWriter(w, h)
- if err := cfg.Render(o); err != nil {
+ if err := cfg.Render(w); err != nil {
return "", err
}
- hash := base64.StdEncoding.EncodeToString(h.Sum(nil))
nginxPath := filepath.Join(filepath.Dir(c.ConfigPath), "ingress-nginx.yaml")
nginx, err := func() (map[string]any, error) {
r, err := fs.Reader(nginxPath)
@@ -446,15 +405,6 @@
if err != nil {
return "", err
}
- cv := nginx["spec"].(map[string]any)["values"].(map[string]any)["controller"].(map[string]any)
- var annotations map[string]any
- if a, ok := cv["podAnnotations"]; ok {
- annotations = a.(map[string]any)
- } else {
- annotations = map[string]any{}
- cv["podAnnotations"] = annotations
- }
- annotations["dodo.cloud/hash"] = string(hash)
buf, err := yaml.Marshal(nginx)
if err != nil {
return "", err
@@ -489,6 +439,7 @@
type NginxProxyConfig struct {
Namespace string
+ PID string
IngressPort int
Resolvers []net.IP
Ingress map[string]string
@@ -541,6 +492,9 @@
if items[0] == "namespace:" {
ret.Namespace = items[1]
}
+ } else if items[0] == "pid" {
+
+ ret.PID = items[1]
} else if items[0] == "http" {
insideHttp = true
} else if insideHttp && items[0] == "map" {
@@ -624,6 +578,9 @@
const nginxConfigTmpl = ` worker_processes 1;
worker_rlimit_nofile 8192;
+ {{- if .PID }}
+ pid {{ .PID }};
+ {{- end }}
events {
worker_connections 1024;
}
@@ -655,7 +612,6 @@
{{- range $port, $upstream := .UDP }}
server {
listen {{ $port }} udp;
- resolver 100.100.100.100;
proxy_pass {{ $upstream }};
}
{{- end }}
diff --git a/core/installer/dodo_app_test.go b/core/installer/dodo_app_test.go
index 25ce4a3..1367c65 100644
--- a/core/installer/dodo_app_test.go
+++ b/core/installer/dodo_app_test.go
@@ -331,3 +331,72 @@
}
t.Log(string(r.Raw))
}
+
+const exposeSVCRemoteCluster = `
+{
+ "cluster": "remote",
+ "service": [{
+ "name": "echo",
+ "type": "golang:1.20.0",
+ "source": {
+ "repository": "ssh://foo.bar"
+ },
+ "ports": [{
+ "name": "echo",
+ "value": 9090
+ }],
+ "expose": [{
+ "port": {
+ "name": "echo"
+ },
+ "network": "Private",
+ "subdomain": "echo"
+ }]
+ }]
+}
+`
+
+func TestExposeSVCRemoteCluster(t *testing.T) {
+ var buf bytes.Buffer
+ if _, err := buf.WriteString(exposeSVCRemoteCluster); err != nil {
+ t.Fatal(err)
+ }
+ clusters := []Cluster{{
+ Name: "remote",
+ Kubeconfig: "<KUBECONFIG>",
+ IngressClassName: "<INGRESS_CLASS_NAME>",
+ }}
+ if err := json.NewEncoder(&buf).Encode(struct {
+ Clusters []Cluster `json:"clusters"`
+ }{
+ clusters,
+ }); err != nil {
+ t.Fatal(err)
+ }
+ app, err := NewDodoApp(buf.Bytes())
+ if err != nil {
+ for _, e := range errors.Errors(err) {
+ t.Log(e)
+ }
+ t.Fatal(err)
+ }
+ release := Release{
+ Namespace: "foo",
+ AppInstanceId: "foo-bar",
+ RepoAddr: "ssh://192.168.100.210:22/config",
+ AppDir: "/foo/bar",
+ }
+ keyGen := testKeyGen{}
+ r, err := app.Render(release, env, networks, clusters, map[string]any{
+ "managerAddr": "",
+ "appId": "",
+ "sshPrivateKey": "",
+ "port_service_echo_0": 1,
+ "port_service_echo_0_cluster": 2,
+ }, nil, keyGen)
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Log(string(r.Raw))
+ t.Log(fmt.Sprintf("%+v", r.Ports))
+}
diff --git a/core/installer/reconcile.sh b/core/installer/reconcile.sh
index f8b3295..54aa570 100755
--- a/core/installer/reconcile.sh
+++ b/core/installer/reconcile.sh
@@ -2,4 +2,6 @@
do
flux reconcile source git -n hgrz hgrz
flux reconcile kustomization -n hgrz hgrz
+ flux reconcile source git -n dodo-flux dodo-flux
+ flux reconcile kustomization -n dodo-flux dodo-flux
done
diff --git a/core/installer/values-tmpl/ingress-public.cue b/core/installer/values-tmpl/ingress-public.cue
index fe6098e..119dd44 100644
--- a/core/installer/values-tmpl/ingress-public.cue
+++ b/core/installer/values-tmpl/ingress-public.cue
@@ -4,6 +4,7 @@
input: {
sshPrivateKey: string
+ controllerReplicaCount: int | *3
}
name: "ingress-public"
@@ -48,7 +49,7 @@
fullnameOverride: "\(global.pcloudEnvName)-ingress-public"
controller: {
kind: "Deployment"
- replicaCount: 1 // TODO(gio): configurable
+ replicaCount: input.controllerReplicaCount
topologySpreadConstraints: [{
labelSelector: {
matchLabels: {
@@ -64,7 +65,7 @@
updateStrategy: {
type: "RollingUpdate"
rollingUpdate: {
- maxSurge: "100%"
+ maxSurge: "50%"
maxUnavailable: "30%"
}
}
diff --git a/core/installer/values-tmpl/private-network.cue b/core/installer/values-tmpl/private-network.cue
index 3ea3c03..1fcf783 100644
--- a/core/installer/values-tmpl/private-network.cue
+++ b/core/installer/values-tmpl/private-network.cue
@@ -9,6 +9,7 @@
ipSubnet: string // TODO(gio): use cidr type
}
sshPrivateKey: string
+ controllerReplicaCount: int | *3
}
name: "private-network"
@@ -97,29 +98,28 @@
serviceAccountName: "\(global.id)-nginx-private"
}
}
- "headscale-user": {
- chart: charts.headscaleUser
- values: {
- resourceName: "private-network-proxy-backend"
- username: "private-network-proxy"
- headscaleApiAddress: "http://headscale-api.\(global.namespacePrefix)app-headscale.svc.cluster.local"
- preAuthKey: {
- enabled: true
- secretName: _clusterProxySecretName
- }
- }
- }
"ingress-nginx": {
chart: charts["ingress-nginx"]
values: {
fullnameOverride: "\(global.id)-nginx-private"
controller: {
+ replicaCount: input.controllerReplicaCount
+ updateStrategy: {
+ type: "RollingUpdate"
+ rollingUpdate: {
+ maxUnavailable: "30%"
+ }
+ }
service: {
enabled: true
type: "LoadBalancer"
annotations: {
"metallb.universe.tf/address-pool": _ingressPrivate
}
+ extraPorts: {
+ tcp: {}
+ udp: {}
+ }
}
ingressClassByName: true
ingressClassResource: {
@@ -143,7 +143,18 @@
configMap: {
name: _proxyBackendConfigName
}
+ }, {
+ name: "proxy-backend-pid"
+ emptyDir: {
+ size: "2Mi"
+ }
+ }, {
+ name: "ts-proxy-state"
+ emptyDir: {
+ size: "2Mi"
+ }
}]
+ shareProcessNamespace: true
extraContainers: [{
name: "proxy"
image: images.tailscale.fullNameWithTag
@@ -154,11 +165,15 @@
privileged: true
}
env: [{
- name: "TS_KUBE_SECRET"
- value: _clusterProxySecretName
+ name: "TS_STATE_DIR"
+ value: "/ts-state"
}, {
name: "TS_HOSTNAME"
- value: "cluster-proxy"
+ valueFrom: {
+ fieldRef: {
+ fieldPath: "metadata.name"
+ }
+ }
}, {
name: "TS_EXTRA_ARGS"
value: "--login-server=https://headscale.\(global.domain)"
@@ -166,6 +181,16 @@
name: "TS_USERSPACE"
value: "false"
}]
+ command: ["/bin/sh"]
+ args: [
+ "-c",
+ "TS_AUTHKEY=$(wget --post-data=\"\" -O /tmp/authkey http://headscale-api.\(global.namespacePrefix)app-headscale.svc.cluster.local/user/private-network-proxy/preauthkey > /dev/null 2>&1 && cat /tmp/authkey) /usr/local/bin/containerboot"
+ ],
+ volumeMounts: [{
+ mountPath: "/ts-state"
+ name: "ts-proxy-state"
+ readOnly: false
+ }]
}, {
name: "proxy-backend"
image: images.nginx.fullNameWithTag
@@ -179,7 +204,34 @@
name: _proxyBackendConfigName
mountPath: "/etc/nginx"
readOnly: true
+ }, {
+ name: "proxy-backend-pid"
+ mountPath: "/var/run/nginx"
+ readOnly: false
}]
+ }, {
+ name: "reload-config"
+ image: "giolekva/reload:latest"
+ imagePullPolicy: "Always"
+ command: [
+ "/usr/bin/reload",
+ "--watch=/etc/nginx/nginx.conf",
+ "--reload=/var/run/nginx/nginx.pid",
+ ]
+ volumeMounts: [{
+ name: "proxy-backend-config"
+ mountPath: "/etc/nginx"
+ readOnly: true
+ }, {
+ name: "proxy-backend-pid"
+ mountPath: "/var/run/nginx"
+ readOnly: true
+ }]
+ securityContext: {
+ capabilities: {
+ add: ["SYS_PTRACE"]
+ }
+ }
}]
admissionWebhooks: {
enabled: false
@@ -255,6 +307,7 @@
"nginx.conf": """
worker_processes 1;
worker_rlimit_nofile 8192;
+pid /var/run/nginx/nginx.pid;
events {
worker_connections 1024;
}
@@ -274,5 +327,4 @@
}
}
-_clusterProxySecretName: "cluster-proxy-preauthkey"
_proxyBackendConfigName: "proxy-backend-config"
diff --git a/core/port-allocator/main.go b/core/port-allocator/main.go
index 7795993..3696342 100644
--- a/core/port-allocator/main.go
+++ b/core/port-allocator/main.go
@@ -302,21 +302,18 @@
if c.proxyCfg == nil {
return fmt.Errorf("does not support TCP/UDP proxy")
}
- var namespace string
- var err error
switch strings.ToLower(protocol) {
case "tcp":
- if namespace, err = c.proxyCfg.AddProxy(port, dest, installer.ProtocolTCP); err != nil {
+ if _, err := c.proxyCfg.AddProxy(port, dest, installer.ProtocolTCP); err != nil {
return err
}
case "udp":
- if namespace, err = c.proxyCfg.AddProxy(port, dest, installer.ProtocolUDP); err != nil {
+ if _, err := c.proxyCfg.AddProxy(port, dest, installer.ProtocolUDP); err != nil {
return err
}
default:
return fmt.Errorf("unknown protocol: %s", protocol)
}
- dest = fmt.Sprintf("%s/proxy-backend-service:%d", namespace, port)
}
_, err := c.repo.Do(func(fs soft.RepoFS) (string, error) {
if err := c.writeState(fs); err != nil {
@@ -327,22 +324,27 @@
return "", err
}
portStr := strconv.Itoa(port)
+ var portMap map[string]any
+ base := "spec.values"
+ if r.IsRemoteProxy {
+ base = "spec.values.controller.service.extraPorts"
+ dest = portStr
+ }
switch protocol {
case "tcp":
- tcp, err := extractPorts(rel, "spec.values.tcp")
+ portMap, err = extractPorts(rel, fmt.Sprintf("%s.tcp", base))
if err != nil {
return "", err
}
- tcp[portStr] = dest
case "udp":
- udp, err := extractPorts(rel, "spec.values.udp")
+ portMap, err = extractPorts(rel, fmt.Sprintf("%s.udp", base))
if err != nil {
return "", err
}
- udp[portStr] = dest
default:
panic("MUST NOT REACH")
}
+ portMap[portStr] = dest
if err := c.writeRelease(fs, rel); err != nil {
return "", err
}