ClusterManager: Implements support of remote clusters.

After this change users will be able to:
* Create cluster and add/remove servers to it
* Install apps on remote cluster
* Move already installed apps between clusters
* Apps running on server being removed will auto-migrate
  to another server from that same cluster

This is achieved by:
* Installing and running minimal version of dodo on remote cluster
* Ingress-nginx is installed automatically on new clusters
* Next to nginx we run VPN client in the same pod, so that
  default cluster can establish secure communication with it
* Multiple reverse proxies are configured to get to the
  remote cluster service from ingress installed on default cluster.

Next steps:
* Support remote clusters in dodo apps (prototype ready)
* Clean up old cluster when moving app to the new one. Currently
  old cluster keeps running app pods even though no ingress can
  reach it anymore.

Change-Id: Iffc908c93416d4126a8e1c2832eae7b659cb8044
diff --git a/core/installer/welcome/appmanager-tmpl/all-clusters.html b/core/installer/welcome/appmanager-tmpl/all-clusters.html
new file mode 100644
index 0000000..843381f
--- /dev/null
+++ b/core/installer/welcome/appmanager-tmpl/all-clusters.html
@@ -0,0 +1,21 @@
+{{ define "header" }}
+<h1>Clusters</h1>
+{{ end }}
+
+{{ define "content"}}
+<form action="/clusters" method="POST">
+	<fieldset class="grid">
+		<input type="text" name="name" placeholder="name" />
+		<button type="submit" name="create-cluster">create cluster</button>
+	</fieldset>
+</form>
+<aside>
+	<nav>
+		<ul>
+			{{ range .Clusters }}
+			<li><a href="/clusters/{{ .Name }}">{{ .Name }}</a></li>
+			{{ end }}
+		</ul>
+	</nav>
+</aside>
+{{ end }}
diff --git a/core/installer/welcome/appmanager-tmpl/app.html b/core/installer/welcome/appmanager-tmpl/app.html
index c6874cb..c1387ad 100644
--- a/core/installer/welcome/appmanager-tmpl/app.html
+++ b/core/installer/welcome/appmanager-tmpl/app.html
@@ -1,19 +1,7 @@
-{{ define "task" }}
-{{ range . }}
-<li aria-busy="{{ eq .Status 1 }}">
-	{{ if eq .Status 3 }}<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24"><path fill="black" d="M21 7L9 19l-5.5-5.5l1.41-1.41L9 16.17L19.59 5.59z"/></svg>{{ end }}{{ .Title }}{{ if .Err }} - {{ .Err.Error }} {{ end }}
-	{{ if .Subtasks }}
-	<ul>
-   		{{ template "task" .Subtasks }}
-	</ul>
-	{{ end }}
-</li>
-{{ end }}
-{{ end }}
-
 {{ define "schema-form" }}
   {{ $readonly := .ReadOnly }}
   {{ $networks := .AvailableNetworks }}
+  {{ $clusters := .AvailableClusters }}
   {{ $data := .Data }}
   {{ range $f := .Schema.Fields }}
   {{ $name := $f.Name }}
@@ -38,6 +26,25 @@
           {{ $schema.Name }}
 		  <input type="text" name="{{ $name }}" oninput="valueChanged({{ $name }}, this.value)" {{ if $readonly }}disabled{{ end }} value="{{ index $data $name }}" />
       </label>
+	{{ else if eq $schema.Kind 12 }}
+      <label {{ if $schema.Advanced }}hidden{{ end }}>
+          {{ $schema.Name }}
+		  <details class="dropdown">
+			  {{ $selectedCluster := index $data $name }}
+			  <summary id="{{ $name }}">{{ $selectedCluster }}</summary>
+			  <ul>
+				  {{ range $clusters }}
+					  {{ $selected := eq $selectedCluster .Name }}
+					  <li>
+						  <label>
+							  <input type="radio" name="{{ $name }}" oninput="clusterSelected('{{ $name }}', '{{ .Name }}', this.checked)" {{ if $selected }}checked{{ end }} />
+							  {{ .Name }}
+						  </label>
+					  </li>
+				  {{ end }}
+			  </ul>
+		  </details>
+      </label>
 	{{ else if eq $schema.Kind 3 }}
       <label {{ if $schema.Advanced }}hidden{{ end }}>
           {{ $schema.Name }}
@@ -49,8 +56,8 @@
 					  {{ $selected := eq $selectedNetwork .Name }}
 					  <li>
 						  <label>
-							  <input type="radio" name="{{ $name }}" oninput="networkSelected('{{ $name }}', '{{ .Name }}', this.checked)" {{ if $selected }}checked{{ end }} />
-							  {{ .Name }}
+							  <input type="radio" name="{{ $name }}" oninput="networkSelected('{{ $name }}', '{{ .Name }}', '{{ .Name }} - {{ .Domain }}', this.checked)" {{ if $selected }}checked{{ end }} />
+							  {{ .Name }} - {{ .Domain }}
 						  </label>
 					  </li>
 				  {{ end }}
@@ -75,7 +82,7 @@
 					  <li>
 						  <label>
 							  <input type="checkbox" name="{{ $networkName }}" oninput="multiNetworkSelected('{{ $name }}', '{{ $networkName }}', this.checked)" {{ if $selected }}checked{{ end }} />
-							  {{ .Name }}
+							  {{ .Name }} - {{ .Domain }}
 						  </label>
 					  </li>
 				  {{ end }}
@@ -132,25 +139,14 @@
 {{ define "content"}}
   {{ $schema := .App.Schema }}
   {{ $networks := .AvailableNetworks }}
+  {{ $clusters := .AvailableClusters }}
   {{ $instance := .Instance }}
-  {{ $renderForm := true }}
 
-  {{ if .Task }}
-    {{if or (eq .Task.Status 0) (eq .Task.Status 1) }}
-    {{ $renderForm = false }}
-    Installation in progress (feel free to navigate away from this page):
-    <ul class="progress">
-      {{ template "task" .Task.Subtasks }}
-    </ul>
-    {{ end }}
-  {{ end }}
-
-  {{ if $renderForm }}
   <form id="config-form">
 	  {{ if $instance }}
-		{{ template "schema-form" (dict "Schema" $schema "AvailableNetworks" $networks "ReadOnly" false "Data" ($instance.InputToValues $schema)) }}
+		{{ template "schema-form" (dict "Schema" $schema "AvailableNetworks" $networks "AvailableClusters" $clusters "ReadOnly" false "Data" ($instance.InputToValues $schema)) }}
 	  {{ else }}
-		{{ template "schema-form" (dict "Schema" $schema "AvailableNetworks" $networks "ReadOnly" false "Data" (dict)) }}
+		{{ template "schema-form" (dict "Schema" $schema "AvailableNetworks" $networks "AvailableClusters" $clusters "ReadOnly" false "Data" (dict)) }}
 	  {{ end }}
 	  {{ if $instance }}
 		<div class="grid">
@@ -161,7 +157,6 @@
 		<button type="submit" id="submit">{{ if $instance }}Update{{ else }}Install{{ end }}</button>
 	  {{ end }}
   </form>
-  {{ end }}
 
 <div id="toast-failure" class="toast hidden">
   <svg xmlns="http://www.w3.org/2000/svg" width="36" height="36" viewBox="0 0 24 24"><path fill="none" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.5" d="M12 22c5.523 0 10-4.477 10-10S17.523 2 12 2S2 6.477 2 12s4.477 10 10 10Zm3-6L9 8m0 8l6-8"/></svg> {{ if $instance }}Update failed{{ else}}Install failed{{ end }}
@@ -200,11 +195,19 @@
 	 setValue(name, value, config);
  }
 
- function networkSelected(name, network, selected) {
+ function clusterSelected(name, cluster, selected) {
+	console.log(selected);
+	setValue(name, cluster, config);
+	let summary = document.getElementById(name);
+	summary.innerHTML = cluster;
+	summary.parentNode.removeAttribute("open");
+ }
+
+ function networkSelected(name, network, label, selected) {
 	console.log(selected);
 	setValue(name, network, config);
 	let summary = document.getElementById(name);
-	summary.innerHTML = network;
+	summary.innerHTML = label;
 	summary.parentNode.removeAttribute("open");
  }
 
@@ -310,29 +313,6 @@
 		 }
 	 });
  }
-
- {{ if .Task }}
- async function refresh() {
-	 try {
-		 const resp = await fetch(window.location.href);
-		 if (resp.ok) {
-			 var tmp = document.createElement("html");
-			 tmp.innerHTML = await resp.text();
-			 const progress = tmp.getElementsByClassName("progress")[0];
-			 if (progress) {
-				 document.getElementsByClassName("progress")[0].innerHTML = progress.innerHTML;
-			 } else {
-				 location.reload();
-			 }
-		 }
-	 } catch (error) {
-		 console.log(error);
-	 } finally {
-		 setTimeout(refresh, 3000);
-	 }
- }
- setTimeout(refresh, 3000);
- {{ end }}
 </script>
 
 {{end}}
diff --git a/core/installer/welcome/appmanager-tmpl/base.html b/core/installer/welcome/appmanager-tmpl/base.html
index 4458485..cb69970 100644
--- a/core/installer/welcome/appmanager-tmpl/base.html
+++ b/core/installer/welcome/appmanager-tmpl/base.html
@@ -18,6 +18,8 @@
                   <li><a href="/installed" class="{{ if (eq .CurrentPage "installed") }}primary{{ end }}">Installed</a></li>
                   <li><a href="/not-installed" class="{{ if (eq .CurrentPage "not-installed") }}primary{{ end }}">Not Installed</a></li>
                   <hr>
+                  <li><a href="/clusters" class="{{ if (eq .CurrentPage "clusters") }}primary{{ end }}">Clusters</a></li>
+				  <hr>
                   {{ block "extra_menu" . }}{{ end }}
                 </ul>
             </nav>
@@ -29,3 +31,16 @@
     <script src="/stat/app-manager.js?v=0.0.11"></script>
 	</body>
 </html>
+
+{{ define "task" }}
+{{ range . }}
+<li aria-busy="{{ eq .Status 1 }}">
+	{{ if eq .Status 3 }}<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24"><path fill="black" d="M21 7L9 19l-5.5-5.5l1.41-1.41L9 16.17L19.59 5.59z"/></svg>{{ end }}{{ .Title }}{{ if .Err }} - {{ .Err.Error }} {{ end }}
+	{{ if .Subtasks }}
+	<ul>
+   		{{ template "task" .Subtasks }}
+	</ul>
+	{{ end }}
+</li>
+{{ end }}
+{{ end }}
diff --git a/core/installer/welcome/appmanager-tmpl/cluster.html b/core/installer/welcome/appmanager-tmpl/cluster.html
new file mode 100644
index 0000000..2ef2fbe
--- /dev/null
+++ b/core/installer/welcome/appmanager-tmpl/cluster.html
@@ -0,0 +1,70 @@
+{{ define "header" }}
+<h1>Cluster - {{ .Cluster.Name }}</h1>
+{{ end }}
+
+{{ define "content" }}
+<form action="/clusters/{{ .Cluster.Name }}/remove" method="POST">
+	<button type="submit" name="remove-cluster">remove cluster</button>
+</form>
+<form action="/clusters/{{ .Cluster.Name }}/servers" method="POST" autocomplete="off">
+	<details class="dropdown">
+		<summary id="type">worker</summary>
+		<ul>
+			<li>
+				<label>
+					<input type="radio" name="type" value="worker" checked />
+					worker
+				</label>
+			</li>
+			<li>
+				<label>
+					<input type="radio" name="type" value="controller" />
+					controller
+				</label>
+			</li>
+		</ul>
+	</details>
+	<input type="text" name="ip" placeholder="ip" />
+	<input type="text" name="port" placeholder="22 (optional)" />
+	<input type="text" name="user" placeholder="user" />
+	<input type="password" name="password" placeholder="password" />
+	<button type="submit" name="add-server">add server</button>
+</form>
+{{ $c := .Cluster }}
+<table class="striped">
+	<thead>
+		<tr>
+			<th scope="col">type</th>
+			<th scope="col">hostname</th>
+			<th scope="col">ip</th>
+			<th scope="col">remove</th>
+		</tr>
+	</thead>
+	<tbody>
+		{{ range $s := .Cluster.Controllers }}
+		<tr>
+			<th>controller</th>
+			<th scope="row">{{ $s.Name }}</th>
+			<td>{{ $s.IP }} </td>
+			<td>
+				<form action="/clusters/{{ $c.Name }}/servers/{{ $s.Name }}/remove" method="POST">
+					<button type="submit">remove</button>
+				</form>
+			</td>
+		</tr>
+		{{ end }}
+		{{ range $s := .Cluster.Workers }}
+		<tr>
+			<th>worker</th>
+			<th scope="row">{{ $s.Name }}</th>
+			<td>{{ $s.IP }} </td>
+			<td>
+				<form action="/clusters/{{ $c.Name }}/servers/{{ $s.Name }}/remove" method="POST">
+					<button type="submit">remove</button>
+				</form>
+			</td>
+		</tr>
+		{{ end }}
+	</tbody>
+</table>
+{{ end }}
diff --git a/core/installer/welcome/appmanager-tmpl/task.html b/core/installer/welcome/appmanager-tmpl/task.html
new file mode 100644
index 0000000..699a660
--- /dev/null
+++ b/core/installer/welcome/appmanager-tmpl/task.html
@@ -0,0 +1,30 @@
+{{ define "content"}}
+Installation in progress (feel free to navigate away from this page):
+<ul class="progress">
+    {{ template "task" .Task.Subtasks }}
+</ul>
+
+<script>
+ async function refresh() {
+	 try {
+		 const resp = await fetch(window.location.href);
+		 console.log(window.location.href, resp);
+		 if (resp.ok) {
+			 if (window.location.href != resp.url) {
+				 location.assign(resp.url);
+			 } else {
+				 var tmp = document.createElement("html");
+				 tmp.innerHTML = await resp.text();
+				 const progress = tmp.getElementsByClassName("progress")[0];
+				 document.getElementsByClassName("progress")[0].innerHTML = progress.innerHTML;
+			 }
+		 }
+	 } catch (error) {
+		 console.log(error);
+	 } finally {
+		 setTimeout(refresh, 3000);
+	 }
+ }
+ setTimeout(refresh, 3000);
+</script>
+{{ end }}
diff --git a/core/installer/welcome/appmanager.go b/core/installer/welcome/appmanager.go
index 7f168dc..3016190 100644
--- a/core/installer/welcome/appmanager.go
+++ b/core/installer/welcome/appmanager.go
@@ -4,37 +4,57 @@
 	"context"
 	"embed"
 	"encoding/json"
+	"errors"
 	"fmt"
 	"html/template"
 	"io/ioutil"
 	"log"
+	"net"
 	"net/http"
+	"strconv"
+	"strings"
+	"sync"
 	"time"
 
 	"github.com/Masterminds/sprig/v3"
 	"github.com/gorilla/mux"
 
 	"github.com/giolekva/pcloud/core/installer"
+	"github.com/giolekva/pcloud/core/installer/cluster"
+	"github.com/giolekva/pcloud/core/installer/soft"
 	"github.com/giolekva/pcloud/core/installer/tasks"
 )
 
 //go:embed appmanager-tmpl/*
 var appTmpls embed.FS
 
+type taskForward struct {
+	task       tasks.Task
+	redirectTo string
+}
+
 type AppManagerServer struct {
-	port       int
-	m          *installer.AppManager
-	r          installer.AppRepository
-	reconciler *tasks.FixedReconciler
-	h          installer.HelmReleaseMonitor
-	tasks      map[string]tasks.Task
-	ta         map[string]installer.EnvApp
-	tmpl       tmplts
+	l            sync.Locker
+	port         int
+	repo         soft.RepoIO
+	m            *installer.AppManager
+	r            installer.AppRepository
+	fr           installer.AppRepository
+	reconciler   *tasks.FixedReconciler
+	h            installer.HelmReleaseMonitor
+	cnc          installer.ClusterNetworkConfigurator
+	vpnAPIClient installer.VPNAPIClient
+	tasks        map[string]taskForward
+	ta           map[string]installer.EnvApp
+	tmpl         tmplts
 }
 
 type tmplts struct {
-	index *template.Template
-	app   *template.Template
+	index       *template.Template
+	app         *template.Template
+	allClusters *template.Template
+	cluster     *template.Template
+	task        *template.Template
 }
 
 func parseTemplatesAppManager(fs embed.FS) (tmplts, error) {
@@ -57,29 +77,50 @@
 	if err != nil {
 		return tmplts{}, err
 	}
-	return tmplts{index, app}, nil
+	allClusters, err := parse("appmanager-tmpl/all-clusters.html")
+	if err != nil {
+		return tmplts{}, err
+	}
+	cluster, err := parse("appmanager-tmpl/cluster.html")
+	if err != nil {
+		return tmplts{}, err
+	}
+	task, err := parse("appmanager-tmpl/task.html")
+	if err != nil {
+		return tmplts{}, err
+	}
+	return tmplts{index, app, allClusters, cluster, task}, nil
 }
 
 func NewAppManagerServer(
 	port int,
+	repo soft.RepoIO,
 	m *installer.AppManager,
 	r installer.AppRepository,
+	fr installer.AppRepository,
 	reconciler *tasks.FixedReconciler,
 	h installer.HelmReleaseMonitor,
+	cnc installer.ClusterNetworkConfigurator,
+	vpnAPIClient installer.VPNAPIClient,
 ) (*AppManagerServer, error) {
 	tmpl, err := parseTemplatesAppManager(appTmpls)
 	if err != nil {
 		return nil, err
 	}
 	return &AppManagerServer{
-		port:       port,
-		m:          m,
-		r:          r,
-		reconciler: reconciler,
-		h:          h,
-		tasks:      make(map[string]tasks.Task),
-		ta:         make(map[string]installer.EnvApp),
-		tmpl:       tmpl,
+		l:            &sync.Mutex{},
+		port:         port,
+		repo:         repo,
+		m:            m,
+		r:            r,
+		fr:           fr,
+		reconciler:   reconciler,
+		h:            h,
+		cnc:          cnc,
+		vpnAPIClient: vpnAPIClient,
+		tasks:        make(map[string]taskForward),
+		ta:           make(map[string]installer.EnvApp),
+		tmpl:         tmpl,
 	}, nil
 }
 
@@ -102,8 +143,15 @@
 	r.HandleFunc("/api/instance/{slug}", s.handleInstance).Methods(http.MethodGet)
 	r.HandleFunc("/api/instance/{slug}/update", s.handleAppUpdate).Methods(http.MethodPost)
 	r.HandleFunc("/api/instance/{slug}/remove", s.handleAppRemove).Methods(http.MethodPost)
+	r.HandleFunc("/clusters/{cluster}/servers/{server}/remove", s.handleClusterRemoveServer).Methods(http.MethodPost)
+	r.HandleFunc("/clusters/{cluster}/servers", s.handleClusterAddServer).Methods(http.MethodPost)
+	r.HandleFunc("/clusters/{name}", s.handleCluster).Methods(http.MethodGet)
+	r.HandleFunc("/clusters/{name}/remove", s.handleRemoveCluster).Methods(http.MethodPost)
+	r.HandleFunc("/clusters", s.handleAllClusters).Methods(http.MethodGet)
+	r.HandleFunc("/clusters", s.handleCreateCluster).Methods(http.MethodPost)
 	r.HandleFunc("/app/{slug}", s.handleAppUI).Methods(http.MethodGet)
 	r.HandleFunc("/instance/{slug}", s.handleInstanceUI).Methods(http.MethodGet)
+	r.HandleFunc("/tasks/{slug}", s.handleTaskStatus).Methods(http.MethodGet)
 	r.HandleFunc("/{pageType}", s.handleAppsList).Methods(http.MethodGet)
 	r.HandleFunc("/", s.handleAppsList).Methods(http.MethodGet)
 	fmt.Printf("Starting HTTP server on port: %d\n", s.port)
@@ -201,6 +249,8 @@
 }
 
 func (s *AppManagerServer) handleAppInstall(w http.ResponseWriter, r *http.Request) {
+	s.l.Lock()
+	defer s.l.Unlock()
 	slug, ok := mux.Vars(r)["slug"]
 	if !ok {
 		http.Error(w, "empty slug", http.StatusBadRequest)
@@ -249,20 +299,27 @@
 	if _, ok := s.tasks[instanceId]; ok {
 		panic("MUST NOT REACH!")
 	}
-	s.tasks[instanceId] = t
+	s.tasks[instanceId] = taskForward{t, fmt.Sprintf("/instance/%s", instanceId)}
 	s.ta[instanceId] = a
 	t.OnDone(func(err error) {
-		delete(s.tasks, instanceId)
-		delete(s.ta, instanceId)
+		go func() {
+			time.Sleep(30 * time.Second)
+			s.l.Lock()
+			defer s.l.Unlock()
+			delete(s.tasks, instanceId)
+			delete(s.ta, instanceId)
+		}()
 	})
 	go t.Start()
-	if _, err := fmt.Fprintf(w, "/instance/%s", instanceId); err != nil {
+	if _, err := fmt.Fprintf(w, "/tasks/%s", instanceId); err != nil {
 		http.Error(w, err.Error(), http.StatusInternalServerError)
 		return
 	}
 }
 
 func (s *AppManagerServer) handleAppUpdate(w http.ResponseWriter, r *http.Request) {
+	s.l.Lock()
+	defer s.l.Unlock()
 	slug, ok := mux.Vars(r)["slug"]
 	if !ok {
 		http.Error(w, "empty slug", http.StatusBadRequest)
@@ -291,11 +348,16 @@
 	go s.reconciler.Reconcile(ctx)
 	t := tasks.NewMonitorRelease(s.h, rr)
 	t.OnDone(func(err error) {
-		delete(s.tasks, slug)
+		go func() {
+			time.Sleep(30 * time.Second)
+			s.l.Lock()
+			defer s.l.Unlock()
+			delete(s.tasks, slug)
+		}()
 	})
-	s.tasks[slug] = t
+	s.tasks[slug] = taskForward{t, fmt.Sprintf("/instance/%s", slug)}
 	go t.Start()
-	if _, err := fmt.Fprintf(w, "/instance/%s", slug); err != nil {
+	if _, err := fmt.Fprintf(w, "/tasks/%s", slug); err != nil {
 		http.Error(w, err.Error(), http.StatusInternalServerError)
 		return
 	}
@@ -373,6 +435,7 @@
 	Instance          *installer.AppInstanceConfig
 	Instances         []installer.AppInstanceConfig
 	AvailableNetworks []installer.Network
+	AvailableClusters []cluster.State
 	Task              tasks.Task
 	CurrentPage       string
 }
@@ -403,10 +466,16 @@
 		http.Error(w, err.Error(), http.StatusInternalServerError)
 		return
 	}
+	clusters, err := s.m.GetClusters()
+	if err != nil {
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
 	data := appPageData{
 		App:               a,
 		Instances:         instances,
 		AvailableNetworks: networks,
+		AvailableClusters: clusters,
 		CurrentPage:       a.Name(),
 	}
 	if err := s.tmpl.app.Execute(w, data); err != nil {
@@ -416,6 +485,8 @@
 }
 
 func (s *AppManagerServer) handleInstanceUI(w http.ResponseWriter, r *http.Request) {
+	s.l.Lock()
+	defer s.l.Unlock()
 	global, err := s.m.Config()
 	if err != nil {
 		http.Error(w, err.Error(), http.StatusInternalServerError)
@@ -432,6 +503,10 @@
 		http.Error(w, err.Error(), http.StatusInternalServerError)
 		return
 	}
+	if ok && !(t.task.Status() == tasks.StatusDone || t.task.Status() == tasks.StatusFailed) {
+		http.Redirect(w, r, fmt.Sprintf("/tasks/%s", slug), http.StatusSeeOther)
+		return
+	}
 	var a installer.EnvApp
 	if instance != nil {
 		a, err = s.m.GetInstanceApp(instance.Id)
@@ -456,12 +531,18 @@
 		http.Error(w, err.Error(), http.StatusInternalServerError)
 		return
 	}
+	clusters, err := s.m.GetClusters()
+	if err != nil {
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
 	data := appPageData{
 		App:               a,
 		Instance:          instance,
 		Instances:         instances,
 		AvailableNetworks: networks,
-		Task:              t,
+		AvailableClusters: clusters,
+		Task:              t.task,
 		CurrentPage:       slug,
 	}
 	if err := s.tmpl.app.Execute(w, data); err != nil {
@@ -469,3 +550,319 @@
 		return
 	}
 }
+
+type taskStatusData struct {
+	CurrentPage string
+	Task        tasks.Task
+}
+
+func (s *AppManagerServer) handleTaskStatus(w http.ResponseWriter, r *http.Request) {
+	s.l.Lock()
+	defer s.l.Unlock()
+	slug, ok := mux.Vars(r)["slug"]
+	if !ok {
+		http.Error(w, "empty slug", http.StatusBadRequest)
+		return
+	}
+	t, ok := s.tasks[slug]
+	if !ok {
+		http.Error(w, "task not found", http.StatusInternalServerError)
+
+		return
+	}
+	if ok && (t.task.Status() == tasks.StatusDone || t.task.Status() == tasks.StatusFailed) {
+		http.Redirect(w, r, t.redirectTo, http.StatusSeeOther)
+		return
+	}
+	data := taskStatusData{
+		CurrentPage: "",
+		Task:        t.task,
+	}
+	if err := s.tmpl.task.Execute(w, data); err != nil {
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
+}
+
+type clustersData struct {
+	CurrentPage string
+	Clusters    []cluster.State
+}
+
+func (s *AppManagerServer) handleAllClusters(w http.ResponseWriter, r *http.Request) {
+	clusters, err := s.m.GetClusters()
+	if err != nil {
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
+	data := clustersData{
+		"clusters",
+		clusters,
+	}
+	if err := s.tmpl.allClusters.Execute(w, data); err != nil {
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
+}
+
+type clusterData struct {
+	CurrentPage string
+	Cluster     cluster.State
+}
+
+func (s *AppManagerServer) handleCluster(w http.ResponseWriter, r *http.Request) {
+	name, ok := mux.Vars(r)["name"]
+	if !ok {
+		http.Error(w, "empty name", http.StatusBadRequest)
+		return
+	}
+	m, err := s.getClusterManager(name)
+	if err != nil {
+		if errors.Is(err, installer.ErrorNotFound) {
+			http.Error(w, "not found", http.StatusNotFound)
+		} else {
+			http.Error(w, err.Error(), http.StatusInternalServerError)
+		}
+		return
+	}
+	data := clusterData{
+		"clusters",
+		m.State(),
+	}
+	if err := s.tmpl.cluster.Execute(w, data); err != nil {
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
+}
+
+func (s *AppManagerServer) handleClusterRemoveServer(w http.ResponseWriter, r *http.Request) {
+	s.l.Lock()
+	defer s.l.Unlock()
+	cName, ok := mux.Vars(r)["cluster"]
+	if !ok {
+		http.Error(w, "empty name", http.StatusBadRequest)
+		return
+	}
+	if _, ok := s.tasks[cName]; ok {
+		http.Error(w, "cluster task in progress", http.StatusLocked)
+		return
+	}
+	sName, ok := mux.Vars(r)["server"]
+	if !ok {
+		http.Error(w, "empty name", http.StatusBadRequest)
+		return
+	}
+	m, err := s.getClusterManager(cName)
+	if err != nil {
+		if errors.Is(err, installer.ErrorNotFound) {
+			http.Error(w, "not found", http.StatusNotFound)
+		} else {
+			http.Error(w, err.Error(), http.StatusInternalServerError)
+		}
+		return
+	}
+	task := tasks.NewClusterRemoveServerTask(m, sName, s.repo)
+	task.OnDone(func(err error) {
+		go func() {
+			time.Sleep(30 * time.Second)
+			s.l.Lock()
+			defer s.l.Unlock()
+			delete(s.tasks, cName)
+		}()
+	})
+	go task.Start()
+	s.tasks[cName] = taskForward{task, fmt.Sprintf("/clusters/%s", cName)}
+	http.Redirect(w, r, fmt.Sprintf("/tasks/%s", cName), http.StatusSeeOther)
+}
+
+func (s *AppManagerServer) getClusterManager(cName string) (cluster.Manager, error) {
+	clusters, err := s.m.GetClusters()
+	if err != nil {
+		return nil, err
+	}
+	var c *cluster.State
+	for _, i := range clusters {
+		if i.Name == cName {
+			c = &i
+			break
+		}
+	}
+	if c == nil {
+		return nil, installer.ErrorNotFound
+	}
+	return cluster.RestoreKubeManager(*c)
+}
+
+func (s *AppManagerServer) handleClusterAddServer(w http.ResponseWriter, r *http.Request) {
+	s.l.Lock()
+	defer s.l.Unlock()
+	cName, ok := mux.Vars(r)["cluster"]
+	if !ok {
+		http.Error(w, "empty name", http.StatusBadRequest)
+		return
+	}
+	if _, ok := s.tasks[cName]; ok {
+		http.Error(w, "cluster task in progress", http.StatusLocked)
+		return
+	}
+	m, err := s.getClusterManager(cName)
+	if err != nil {
+		if errors.Is(err, installer.ErrorNotFound) {
+			http.Error(w, "not found", http.StatusNotFound)
+		} else {
+			http.Error(w, err.Error(), http.StatusInternalServerError)
+		}
+		return
+	}
+	t := r.PostFormValue("type")
+	ip := net.ParseIP(r.PostFormValue("ip"))
+	if ip == nil {
+		http.Error(w, "invalid ip", http.StatusBadRequest)
+		return
+	}
+	port := 22
+	if p := r.PostFormValue("port"); p != "" {
+		port, err = strconv.Atoi(p)
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusBadRequest)
+			return
+		}
+	}
+	server := cluster.Server{
+		IP:       ip,
+		Port:     port,
+		User:     r.PostFormValue("user"),
+		Password: r.PostFormValue("password"),
+	}
+	var task tasks.Task
+	switch strings.ToLower(t) {
+	case "controller":
+		if len(m.State().Controllers) == 0 {
+			task = tasks.NewClusterInitTask(m, server, s.cnc, s.repo, s.setupRemoteCluster())
+		} else {
+			task = tasks.NewClusterJoinControllerTask(m, server, s.repo)
+		}
+	case "worker":
+		task = tasks.NewClusterJoinWorkerTask(m, server, s.repo)
+	default:
+		http.Error(w, "invalid type", http.StatusBadRequest)
+		return
+	}
+	task.OnDone(func(err error) {
+		go func() {
+			time.Sleep(30 * time.Second)
+			s.l.Lock()
+			defer s.l.Unlock()
+			delete(s.tasks, cName)
+		}()
+	})
+	go task.Start()
+	s.tasks[cName] = taskForward{task, fmt.Sprintf("/clusters/%s", cName)}
+	http.Redirect(w, r, fmt.Sprintf("/tasks/%s", cName), http.StatusSeeOther)
+}
+
+func (s *AppManagerServer) handleCreateCluster(w http.ResponseWriter, r *http.Request) {
+	cName := r.PostFormValue("name")
+	if cName == "" {
+		http.Error(w, "no name", http.StatusBadRequest)
+		return
+	}
+	st := cluster.State{Name: cName}
+	if _, err := s.repo.Do(func(fs soft.RepoFS) (string, error) {
+		if err := soft.WriteJson(fs, fmt.Sprintf("/clusters/%s/config.json", cName), st); err != nil {
+			return "", err
+		}
+		return fmt.Sprintf("create cluster: %s", cName), nil
+	}); err != nil {
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
+	http.Redirect(w, r, fmt.Sprintf("/clusters/%s", cName), http.StatusSeeOther)
+}
+
+func (s *AppManagerServer) handleRemoveCluster(w http.ResponseWriter, r *http.Request) {
+	cName, ok := mux.Vars(r)["name"]
+	if !ok {
+		http.Error(w, "empty name", http.StatusBadRequest)
+		return
+	}
+	if _, ok := s.tasks[cName]; ok {
+		http.Error(w, "cluster task in progress", http.StatusLocked)
+		return
+	}
+	m, err := s.getClusterManager(cName)
+	if err != nil {
+		if errors.Is(err, installer.ErrorNotFound) {
+			http.Error(w, "not found", http.StatusNotFound)
+		} else {
+			http.Error(w, err.Error(), http.StatusInternalServerError)
+		}
+		return
+	}
+	task := tasks.NewRemoveClusterTask(m, s.cnc, s.repo)
+	task.OnDone(func(err error) {
+		go func() {
+			time.Sleep(30 * time.Second)
+			s.l.Lock()
+			defer s.l.Unlock()
+			delete(s.tasks, cName)
+		}()
+	})
+	go task.Start()
+	s.tasks[cName] = taskForward{task, fmt.Sprintf("/clusters/%s", cName)}
+	http.Redirect(w, r, fmt.Sprintf("/tasks/%s", cName), http.StatusSeeOther)
+}
+
+func (s *AppManagerServer) setupRemoteCluster() cluster.ClusterSetupFunc {
+	const vpnUser = "private-network-proxy"
+	return func(name, kubeconfig, ingressClassName string) (net.IP, error) {
+		hostname := fmt.Sprintf("cluster-%s", name)
+		t := tasks.NewInstallTask(s.h, func() (installer.ReleaseResources, error) {
+			app, err := installer.FindEnvApp(s.fr, "cluster-network")
+			if err != nil {
+				return installer.ReleaseResources{}, err
+			}
+			env, err := s.m.Config()
+			if err != nil {
+				return installer.ReleaseResources{}, err
+			}
+			instanceId := fmt.Sprintf("%s-%s", app.Slug(), name)
+			appDir := fmt.Sprintf("/clusters/%s/ingress", name)
+			namespace := fmt.Sprintf("%scluster-network-%s", env.NamespacePrefix, name)
+			rr, err := s.m.Install(app, instanceId, appDir, namespace, map[string]any{
+				"cluster": map[string]any{
+					"name":             name,
+					"kubeconfig":       kubeconfig,
+					"ingressClassName": ingressClassName,
+				},
+				// TODO(gio): remove hardcoded user
+				"vpnUser":          vpnUser,
+				"vpnProxyHostname": hostname,
+			})
+			if err != nil {
+				return installer.ReleaseResources{}, err
+			}
+			ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
+			go s.reconciler.Reconcile(ctx)
+			return rr, err
+		})
+		ch := make(chan error)
+		t.OnDone(func(err error) {
+			ch <- err
+		})
+		go t.Start()
+		err := <-ch
+		if err != nil {
+			return nil, err
+		}
+		for {
+			ip, err := s.vpnAPIClient.GetNodeIP(vpnUser, hostname)
+			if err == nil {
+				return ip, nil
+			}
+			if errors.Is(err, installer.ErrorNotFound) {
+				time.Sleep(5 * time.Second)
+			}
+		}
+	}
+}
diff --git a/core/installer/welcome/dodo_app.go b/core/installer/welcome/dodo_app.go
index 7f3d383..9a93afd 100644
--- a/core/installer/welcome/dodo_app.go
+++ b/core/installer/welcome/dodo_app.go
@@ -104,6 +104,7 @@
 	nsc               installer.NamespaceCreator
 	jc                installer.JobCreator
 	vpnKeyGen         installer.VPNAPIClient
+	cnc               installer.ClusterNetworkConfigurator
 	workers           map[string]map[string]struct{}
 	appConfigs        map[string]appConfig
 	tmplts            dodoAppTmplts
@@ -136,6 +137,7 @@
 	nsc installer.NamespaceCreator,
 	jc installer.JobCreator,
 	vpnKeyGen installer.VPNAPIClient,
+	cnc installer.ClusterNetworkConfigurator,
 	env installer.EnvConfig,
 	external bool,
 	fetchUsersAddr string,
@@ -171,6 +173,7 @@
 		nsc,
 		jc,
 		vpnKeyGen,
+		cnc,
 		map[string]map[string]struct{}{},
 		map[string]appConfig{},
 		tmplts,
@@ -1073,7 +1076,7 @@
 		return err
 	}
 	hf := installer.NewGitHelmFetcher()
-	m, err := installer.NewAppManager(configRepo, s.nsc, s.jc, hf, s.vpnKeyGen, "/")
+	m, err := installer.NewAppManager(configRepo, s.nsc, s.jc, hf, s.vpnKeyGen, s.cnc, "/")
 	if err != nil {
 		return err
 	}
@@ -1220,7 +1223,7 @@
 		return installer.ReleaseResources{}, err
 	}
 	hf := installer.NewGitHelmFetcher()
-	m, err := installer.NewAppManager(repo, s.nsc, s.jc, hf, s.vpnKeyGen, "/.dodo")
+	m, err := installer.NewAppManager(repo, s.nsc, s.jc, hf, s.vpnKeyGen, s.cnc, "/.dodo")
 	if err != nil {
 		return installer.ReleaseResources{}, err
 	}
diff --git a/core/installer/welcome/env_test.go b/core/installer/welcome/env_test.go
index ee80e4e..439fdf2 100644
--- a/core/installer/welcome/env_test.go
+++ b/core/installer/welcome/env_test.go
@@ -3,6 +3,7 @@
 import (
 	"bytes"
 	"encoding/json"
+	"fmt"
 	"io"
 	"io/fs"
 	"log"
@@ -256,7 +257,13 @@
 	if err := m.m.Add(name, task); err != nil {
 		return err
 	} else {
-		task.OnDone(m.onDone)
+		task.OnDone(func(err error) {
+			if err == nil {
+				m.onDone(nil)
+			} else {
+				m.onDone(fmt.Errorf("%s: %s", name, err))
+			}
+		})
 		return nil
 	}
 }