AppManager: Remove delay between initiating same kind of task twice
Change-Id: I539f3cb357e00ab560eaff34bef8ae2595e11b44
diff --git a/core/installer/welcome/appmanager-tmpl/app.html b/core/installer/welcome/appmanager-tmpl/app.html
index 2629ccf..c4eea06 100644
--- a/core/installer/welcome/appmanager-tmpl/app.html
+++ b/core/installer/welcome/appmanager-tmpl/app.html
@@ -131,9 +131,6 @@
{{ define "extra_menu" }}
<li><a href="/app/{{ .App.Slug }}" {{ if eq $.CurrentPage .App.Name }}class="primary"{{ end }}>{{ .App.Name }}</a></li>
- {{ if (and (not $.Instance) $.Task) }}
- <li><a href="/instance/{{ $.CurrentPage }}" class="primary">{{ $.CurrentPage }}</a></li>
- {{ end }}
{{ range .Instances }}
<li><a href="/instance/{{ .Id }}" {{ if eq $.CurrentPage .Id }}class="primary"{{ end }}>{{ .Id }}</a></li>
{{ end }}
diff --git a/core/installer/welcome/appmanager.go b/core/installer/welcome/appmanager.go
index 190ca12..a5050ef 100644
--- a/core/installer/welcome/appmanager.go
+++ b/core/installer/welcome/appmanager.go
@@ -7,8 +7,6 @@
"errors"
"fmt"
"html/template"
- "io/ioutil"
- "log"
"net"
"net/http"
"strconv"
@@ -31,6 +29,7 @@
type taskForward struct {
task tasks.Task
redirectTo string
+ id int
}
type AppManagerServer struct {
@@ -44,8 +43,7 @@
h installer.HelmReleaseMonitor
cnc installer.ClusterNetworkConfigurator
vpnAPIClient installer.VPNAPIClient
- tasks map[string]taskForward
- ta map[string]installer.EnvApp
+ tasks map[string]*taskForward
tmpl tmplts
}
@@ -118,8 +116,7 @@
h: h,
cnc: cnc,
vpnAPIClient: vpnAPIClient,
- tasks: make(map[string]taskForward),
- ta: make(map[string]installer.EnvApp),
+ tasks: make(map[string]*taskForward),
tmpl: tmpl,
}, nil
}
@@ -301,29 +298,21 @@
http.Error(w, "empty slug", http.StatusBadRequest)
return
}
- contents, err := ioutil.ReadAll(r.Body)
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
var values map[string]any
- if err := json.Unmarshal(contents, &values); err != nil {
+ if err := json.NewDecoder(r.Body).Decode(&values); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
- log.Printf("Values: %+v\n", values)
a, err := installer.FindEnvApp(s.r, slug)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
- log.Printf("Found application: %s\n", slug)
env, err := s.m.Config()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
- log.Printf("Configuration: %+v\n", env)
suffixGen := installer.NewFixedLengthRandomSuffixGenerator(3)
suffix, err := suffixGen.Generate()
if err != nil {
@@ -344,17 +333,8 @@
if _, ok := s.tasks[instanceId]; ok {
panic("MUST NOT REACH!")
}
- s.tasks[instanceId] = taskForward{t, fmt.Sprintf("/instance/%s", instanceId)}
- s.ta[instanceId] = a
- t.OnDone(func(err error) {
- go func() {
- time.Sleep(30 * time.Second)
- s.l.Lock()
- defer s.l.Unlock()
- delete(s.tasks, instanceId)
- delete(s.ta, instanceId)
- }()
- })
+ s.tasks[instanceId] = &taskForward{t, fmt.Sprintf("/instance/%s", instanceId), 0}
+ t.OnDone(s.cleanTask(instanceId, 0))
go t.Start()
if _, err := fmt.Fprintf(w, "/tasks/%s", instanceId); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
@@ -370,19 +350,18 @@
http.Error(w, "empty slug", http.StatusBadRequest)
return
}
- contents, err := ioutil.ReadAll(r.Body)
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
var values map[string]any
- if err := json.Unmarshal(contents, &values); err != nil {
+ if err := json.NewDecoder(r.Body).Decode(&values); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
- if _, ok := s.tasks[slug]; ok {
- http.Error(w, "Update already in progress", http.StatusBadRequest)
- return
+ tid := 0
+ if t, ok := s.tasks[slug]; ok {
+ if t.task != nil {
+ http.Error(w, "Update already in progress", http.StatusBadRequest)
+ return
+ }
+ tid = t.id + 1
}
rr, err := s.m.Update(slug, values)
if err != nil {
@@ -392,15 +371,8 @@
ctx, _ := context.WithTimeout(context.Background(), 2*time.Minute)
go s.reconciler.Reconcile(ctx)
t := tasks.NewMonitorRelease(s.h, rr)
- t.OnDone(func(err error) {
- go func() {
- time.Sleep(30 * time.Second)
- s.l.Lock()
- defer s.l.Unlock()
- delete(s.tasks, slug)
- }()
- })
- s.tasks[slug] = taskForward{t, fmt.Sprintf("/instance/%s", slug)}
+ t.OnDone(s.cleanTask(slug, tid))
+ s.tasks[slug] = &taskForward{t, fmt.Sprintf("/instance/%s", slug), tid}
go t.Start()
if _, err := fmt.Fprintf(w, "/tasks/%s", slug); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
@@ -481,7 +453,6 @@
Instances []installer.AppInstanceConfig
AvailableNetworks []installer.Network
AvailableClusters []cluster.State
- Task tasks.Task
CurrentPage string
}
@@ -542,29 +513,19 @@
http.Error(w, "empty slug", http.StatusBadRequest)
return
}
- t, ok := s.tasks[slug]
- instance, err := s.m.GetInstance(slug)
- if err != nil && !ok {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
- if ok && !(t.task.Status() == tasks.StatusDone || t.task.Status() == tasks.StatusFailed) {
+ if t, ok := s.tasks[slug]; ok && t.task != nil {
http.Redirect(w, r, fmt.Sprintf("/tasks/%s", slug), http.StatusSeeOther)
return
}
- var a installer.EnvApp
- if instance != nil {
- a, err = s.m.GetInstanceApp(instance.Id)
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
- } else {
- var ok bool
- a, ok = s.ta[slug]
- if !ok {
- panic("MUST NOT REACH!")
- }
+ instance, err := s.m.GetInstance(slug)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ a, err := s.m.GetInstanceApp(instance.Id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
}
instances, err := s.m.GetAllAppInstances(a.Slug())
if err != nil {
@@ -587,7 +548,6 @@
Instances: instances,
AvailableNetworks: networks,
AvailableClusters: clusters,
- Task: t.task,
CurrentPage: slug,
}
if err := s.tmpl.app.Execute(w, data); err != nil {
@@ -615,7 +575,7 @@
return
}
- if ok && (t.task.Status() == tasks.StatusDone || t.task.Status() == tasks.StatusFailed) {
+ if ok && t.task == nil {
http.Redirect(w, r, t.redirectTo, http.StatusSeeOther)
return
}
@@ -686,9 +646,13 @@
http.Error(w, "empty name", http.StatusBadRequest)
return
}
- if _, ok := s.tasks[cName]; ok {
- http.Error(w, "cluster task in progress", http.StatusLocked)
- return
+ tid := 0
+ if t, ok := s.tasks[cName]; ok {
+ if t.task != nil {
+ http.Error(w, "cluster task in progress", http.StatusLocked)
+ return
+ }
+ tid = t.id + 1
}
m, err := s.getClusterManager(cName)
if err != nil {
@@ -700,16 +664,9 @@
return
}
task := tasks.NewClusterSetupTask(m, s.setupRemoteClusterStorage(), s.repo, fmt.Sprintf("cluster %s: setting up storage", m.State().Name))
- task.OnDone(func(err error) {
- go func() {
- time.Sleep(30 * time.Second)
- s.l.Lock()
- defer s.l.Unlock()
- delete(s.tasks, cName)
- }()
- })
+ task.OnDone(s.cleanTask(cName, tid))
go task.Start()
- s.tasks[cName] = taskForward{task, fmt.Sprintf("/clusters/%s", cName)}
+ s.tasks[cName] = &taskForward{task, fmt.Sprintf("/clusters/%s", cName), tid}
http.Redirect(w, r, fmt.Sprintf("/tasks/%s", cName), http.StatusSeeOther)
}
@@ -721,9 +678,13 @@
http.Error(w, "empty name", http.StatusBadRequest)
return
}
- if _, ok := s.tasks[cName]; ok {
- http.Error(w, "cluster task in progress", http.StatusLocked)
- return
+ tid := 0
+ if t, ok := s.tasks[cName]; ok {
+ if t.task != nil {
+ http.Error(w, "cluster task in progress", http.StatusLocked)
+ return
+ }
+ tid = t.id + 1
}
sName, ok := mux.Vars(r)["server"]
if !ok {
@@ -740,16 +701,9 @@
return
}
task := tasks.NewClusterRemoveServerTask(m, sName, s.repo)
- task.OnDone(func(err error) {
- go func() {
- time.Sleep(30 * time.Second)
- s.l.Lock()
- defer s.l.Unlock()
- delete(s.tasks, cName)
- }()
- })
+ task.OnDone(s.cleanTask(cName, tid))
go task.Start()
- s.tasks[cName] = taskForward{task, fmt.Sprintf("/clusters/%s", cName)}
+ s.tasks[cName] = &taskForward{task, fmt.Sprintf("/clusters/%s", cName), tid}
http.Redirect(w, r, fmt.Sprintf("/tasks/%s", cName), http.StatusSeeOther)
}
@@ -779,9 +733,13 @@
http.Error(w, "empty name", http.StatusBadRequest)
return
}
- if _, ok := s.tasks[cName]; ok {
- http.Error(w, "cluster task in progress", http.StatusLocked)
- return
+ tid := 0
+ if t, ok := s.tasks[cName]; ok {
+ if t.task != nil {
+ http.Error(w, "cluster task in progress", http.StatusLocked)
+ return
+ }
+ tid = t.id + 1
}
m, err := s.getClusterManager(cName)
if err != nil {
@@ -826,16 +784,9 @@
http.Error(w, "invalid type", http.StatusBadRequest)
return
}
- task.OnDone(func(err error) {
- go func() {
- time.Sleep(30 * time.Second)
- s.l.Lock()
- defer s.l.Unlock()
- delete(s.tasks, cName)
- }()
- })
+ task.OnDone(s.cleanTask(cName, tid))
go task.Start()
- s.tasks[cName] = taskForward{task, fmt.Sprintf("/clusters/%s", cName)}
+ s.tasks[cName] = &taskForward{task, fmt.Sprintf("/clusters/%s", cName), tid}
http.Redirect(w, r, fmt.Sprintf("/tasks/%s", cName), http.StatusSeeOther)
}
@@ -864,9 +815,13 @@
http.Error(w, "empty name", http.StatusBadRequest)
return
}
- if _, ok := s.tasks[cName]; ok {
- http.Error(w, "cluster task in progress", http.StatusLocked)
- return
+ tid := 0
+ if t, ok := s.tasks[cName]; ok {
+ if t.task != nil {
+ http.Error(w, "cluster task in progress", http.StatusLocked)
+ return
+ }
+ tid = t.id + 1
}
m, err := s.getClusterManager(cName)
if err != nil {
@@ -878,16 +833,9 @@
return
}
task := tasks.NewRemoveClusterTask(m, s.cnc, s.repo)
- task.OnDone(func(err error) {
- go func() {
- time.Sleep(30 * time.Second)
- s.l.Lock()
- defer s.l.Unlock()
- delete(s.tasks, cName)
- }()
- })
+ task.OnDone(s.cleanTask(cName, tid))
go task.Start()
- s.tasks[cName] = taskForward{task, fmt.Sprintf("/clusters/%s", cName)}
+ s.tasks[cName] = &taskForward{task, fmt.Sprintf("/clusters/%s", cName), tid}
http.Redirect(w, r, fmt.Sprintf("/tasks/%s", cName), http.StatusSeeOther)
}
@@ -983,3 +931,22 @@
return nil
}
}
+
+func (s *AppManagerServer) cleanTask(name string, id int) func(error) {
+ return func(err error) {
+ if err != nil {
+ fmt.Printf("Task %s failed: %s", name, err.Error())
+ }
+ s.l.Lock()
+ defer s.l.Unlock()
+ s.tasks[name].task = nil
+ go func() {
+ time.Sleep(30 * time.Second)
+ s.l.Lock()
+ defer s.l.Unlock()
+ if t, ok := s.tasks[name]; ok && t.id == id {
+ delete(s.tasks, name)
+ }
+ }()
+ }
+}