ClusterManager: Implements support of remote clusters.

After this change users will be able to:
* Create cluster and add/remove servers to it
* Install apps on remote cluster
* Move already installed apps between clusters
* Apps running on server being removed will auto-migrate
  to another server from that same cluster

This is achieved by:
* Installing and running minimal version of dodo on remote cluster
* Ingress-nginx is installed automatically on new clusters
* Next to nginx we run VPN client in the same pod, so that
  default cluster can establish secure communication with it
* Multiple reverse proxies are configured to get to the
  remote cluster service from ingress installed on default cluster.

Next steps:
* Support remote clusters in dodo apps (prototype ready)
* Clean up old cluster when moving app to the new one. Currently
  old cluster keeps running app pods even though no ingress can
  reach it anymore.

Change-Id: Iffc908c93416d4126a8e1c2832eae7b659cb8044
diff --git a/core/headscale/main.go b/core/headscale/main.go
index 698d9d2..194da06 100644
--- a/core/headscale/main.go
+++ b/core/headscale/main.go
@@ -39,6 +39,11 @@
     },
   },
   "acls": [
+    // {
+    //   "action": "accept",
+    //   "src": ["10.42.0.0/16", "10.43.0.0/16", "135.181.48.180/32", "65.108.39.172/32"],
+    //   "dst": ["10.42.0.0/16:*", "10.43.0.0/16:*", "135.181.48.180/32:*", "65.108.39.172/32:*"],
+    // },
     {{- range .cidrs }}
     { // Everyone has passthough access to private-network-proxy node
       "action": "accept",
@@ -46,8 +51,18 @@
       "dst": ["{{ . }}:*", "private-network-proxy:0"],
     },
     {{- end }}
+    { // Everyone has access to every port of nodes owned by private-network-proxy
+      "action": "accept",
+      "src": ["*"],
+      "dst": ["private-network-proxy:*"],
+    },
+    {
+      "action": "accept",
+      "src": ["private-network-proxy"],
+      "dst": ["private-network-proxy:*"],
+    },
     {{- range .users }}
-    { // Everyone has passthough access to private-network-proxy node
+    {
       "action": "accept",
       "src": ["{{ . }}"],
       "dst": ["{{ . }}:*"],
@@ -90,6 +105,7 @@
 	r.HandleFunc("/user/{user}/preauthkey", s.createReusablePreAuthKey).Methods(http.MethodPost)
 	r.HandleFunc("/user/{user}/preauthkey", s.expireReusablePreAuthKey).Methods(http.MethodDelete)
 	r.HandleFunc("/user/{user}/node/{node}/expire", s.expireUserNode).Methods(http.MethodPost)
+	r.HandleFunc("/user/{user}/node/{node}/ip", s.getNodeIP).Methods(http.MethodGet)
 	r.HandleFunc("/user/{user}/node/{node}", s.removeUserNode).Methods(http.MethodDelete)
 	r.HandleFunc("/user", s.createUser).Methods(http.MethodPost)
 	r.HandleFunc("/routes/{id}/enable", s.enableRoute).Methods(http.MethodPost)
@@ -247,6 +263,33 @@
 	}
 }
 
+func (s *server) getNodeIP(w http.ResponseWriter, r *http.Request) {
+	user, ok := mux.Vars(r)["user"]
+	if !ok || user == "" {
+		http.Error(w, "no user", http.StatusBadRequest)
+		return
+	}
+	node, ok := mux.Vars(r)["node"]
+	if !ok || node == "" {
+		http.Error(w, "no name", http.StatusBadRequest)
+		return
+	}
+	addr, err := s.client.getNodeAddresses(user, node)
+	if err != nil {
+		if errors.Is(err, ErrorNotFound) {
+			http.Error(w, err.Error(), http.StatusNotFound)
+		} else {
+			http.Error(w, err.Error(), http.StatusInternalServerError)
+		}
+		return
+	}
+	if len(addr) == 0 || addr[0] == nil {
+		http.Error(w, "no address", http.StatusPreconditionFailed)
+		return
+	}
+	fmt.Fprintf(w, "%s", addr[0].String())
+}
+
 func updateACLs(aclsPath string, cidrs []string, users []string) ([]byte, error) {
 	tmpl, err := template.New("acls").Parse(defaultACLs)
 	if err != nil {