AppManager: App installation status monitoring
Change-Id: I64f4ae0d27892b74f8827a275907cb75da09a758
diff --git a/core/installer/app_configs/app_base.cue b/core/installer/app_configs/app_base.cue
index 1bfa41e..2b3ddd2 100644
--- a/core/installer/app_configs/app_base.cue
+++ b/core/installer/app_configs/app_base.cue
@@ -1,5 +1,7 @@
import (
+ "crypto/sha256"
"encoding/base64"
+ "encoding/json"
"list"
"net"
"strings"
@@ -637,6 +639,7 @@
}
#HelmRelease: {
+ _id: string
_name: string
_chart: _
_values: _
@@ -653,6 +656,7 @@
name: _name
namespace: _namespace
annotations: _annotations & {
+ "dodo.cloud/id": _id
"dodo.cloud/installer-info": _info
}
}
@@ -703,6 +707,12 @@
helm: {
for name, r in out.helmR {
"\(name)": #HelmRelease & {
+ if r.id != _|_ {
+ _id: r.id
+ }
+ if r.id == _|_ {
+ _id: out.id
+ }
_name: name
_chart: _lc[r.chart.name]
_values: r.values
@@ -738,6 +748,15 @@
url: string | *""
#WithOut: {
+ id: base64.Encode(null, sha256.Sum256(json.Marshal([
+ images,
+ charts,
+ helm,
+ openPort,
+ clusterProxy,
+ ])))
+ _id: id
+
cluster?: #Cluster
if input.cluster != _|_ {
cluster: #Cluster | *input.cluster
@@ -847,6 +866,7 @@
helmR: {
for k, v in helm {
"\(k)": v & {
+ id: _id
if v.cluster == _|_ && _cluster != _|_ {
cluster: _cluster
}
diff --git a/core/installer/app_configs/dodo_app.cue b/core/installer/app_configs/dodo_app.cue
index 7e54a08..9f41805 100644
--- a/core/installer/app_configs/dodo_app.cue
+++ b/core/installer/app_configs/dodo_app.cue
@@ -154,11 +154,11 @@
"DODO_PORT_\(strings.ToUpper(p.name))=\(p.value)"
},
],
- ])
+ ])
_envMap: [
- for e in _allEnv { strings.SplitN(e, "=", 2) }
-]
+ for e in _allEnv {strings.SplitN(e, "=", 2)},
+ ]
lastCmdEnv: list.Concat([
_allEnv,
@@ -559,8 +559,8 @@
"\(svc.name)": {
chart: charts.app
annotations: {
- "dodo.cloud/resource-type": "service"
- "dodo.cloud/resource.service.name": svc.name
+ "dodo.cloud/resource-type": "service"
+ "dodo.cloud/resource.service.name": svc.name
}
values: {
image: {
diff --git a/core/installer/app_manager.go b/core/installer/app_manager.go
index e6133c1..7995a41 100644
--- a/core/installer/app_manager.go
+++ b/core/installer/app_manager.go
@@ -81,6 +81,11 @@
}
}
+func (m *AppManager) AppRendered(instanceId string) ([]byte, error) {
+ path := filepath.Join(m.appDirRoot, instanceId, "rendered.json")
+ return soft.ReadFile(m.repo, path)
+}
+
func (m *AppManager) GetAllInstances() ([]AppInstanceConfig, error) {
m.repo.Pull()
kust, err := soft.ReadKustomization(m.repo, filepath.Join(m.appDirRoot, kustomizationFileName))
@@ -317,6 +322,7 @@
}
type Resource struct {
+ Id string `json:"id"`
Name string `json:"name"`
Namespace string `json:"namespace"`
Info string `json:"info"`
@@ -599,7 +605,7 @@
Name: h.Metadata.Name,
Namespace: h.Metadata.Namespace,
Info: fmt.Sprintf("%s/%s", h.Metadata.Namespace, h.Metadata.Name),
- Annotations: nil,
+ Annotations: h.Metadata.Annotations,
}
if h.Metadata.Annotations != nil {
res.Annotations = h.Metadata.Annotations
@@ -607,6 +613,10 @@
if ok && len(info) != 0 {
res.Info = info
}
+ id, ok := h.Metadata.Annotations["dodo.cloud/id"]
+ if ok && len(id) != 0 {
+ res.Id = id
+ }
}
ret = append(ret, res)
}
diff --git a/core/installer/cmd/app_manager.go b/core/installer/cmd/app_manager.go
index c2547d6..2f9aef7 100644
--- a/core/installer/cmd/app_manager.go
+++ b/core/installer/cmd/app_manager.go
@@ -138,6 +138,10 @@
if err != nil {
return err
}
+ im, err := newInstanceMonitor()
+ if err != nil {
+ return err
+ }
s, err := appmanager.NewServer(
appManagerFlags.port,
ssClient,
@@ -147,6 +151,7 @@
fr,
tasks.NewFixedReconciler(env.Id, env.Id),
helmMon,
+ im,
cnc,
vpnAPIClient,
)
diff --git a/core/installer/cmd/kube.go b/core/installer/cmd/kube.go
index 1a74731..b83a5a0 100644
--- a/core/installer/cmd/kube.go
+++ b/core/installer/cmd/kube.go
@@ -3,6 +3,7 @@
import (
"github.com/giolekva/pcloud/core/installer"
"github.com/giolekva/pcloud/core/installer/kube"
+ "github.com/giolekva/pcloud/core/installer/status"
)
func newNSCreator() (installer.NamespaceCreator, error) {
@@ -15,8 +16,16 @@
return installer.NewZoneStatusFetcher(rootFlags.kubeConfig)
}
-func newHelmReleaseMonitor() (installer.HelmReleaseMonitor, error) {
- return installer.NewHelmReleaseMonitor(rootFlags.kubeConfig)
+func newHelmReleaseMonitor() (status.ResourceMonitor, error) {
+ return status.NewHelmReleaseMonitor(rootFlags.kubeConfig)
+}
+
+func newInstanceMonitor() (*status.InstanceMonitor, error) {
+ m, err := status.NewDelegatingMonitor(rootFlags.kubeConfig)
+ if err != nil {
+ return nil, err
+ }
+ return status.NewInstanceMonitor(m), nil
}
func newJobCreator() (installer.JobCreator, error) {
diff --git a/core/installer/kube.go b/core/installer/kube.go
index 7f861af..8328817 100644
--- a/core/installer/kube.go
+++ b/core/installer/kube.go
@@ -3,7 +3,6 @@
import (
"bytes"
"context"
- "encoding/json"
"fmt"
"io"
"net/http"
@@ -13,8 +12,6 @@
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
)
@@ -88,42 +85,3 @@
func NewZoneStatusFetcher(kubeconfig string) (ZoneStatusFetcher, error) {
return &realZoneStatusFetcher{}, nil
}
-
-type HelmReleaseMonitor interface {
- IsReleased(namespace, name string) (bool, error)
-}
-
-type realHelmReleaseMonitor struct {
- d dynamic.Interface
-}
-
-func (m *realHelmReleaseMonitor) IsReleased(namespace, name string) (bool, error) {
- ctx := context.Background()
- res, err := m.d.Resource(schema.GroupVersionResource{"helm.toolkit.fluxcd.io", "v2beta1", "helmreleases"}).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})
- if err != nil {
- return false, err
- }
- b, err := res.MarshalJSON()
- if err != nil {
- return false, err
- }
- var hr helmRelease
- if err := json.Unmarshal(b, &hr); err != nil {
- return false, err
- }
- for _, c := range hr.Status.Conditions {
- if c.Type == "Ready" && c.Status == "True" {
- return true, nil
- }
- }
- return false, nil
-}
-
-func NewHelmReleaseMonitor(kubeconfig string) (HelmReleaseMonitor, error) {
- c, err := kube.NewKubeClient(kube.KubeConfigOpts{KubeConfigPath: kubeconfig})
- if err != nil {
- return nil, err
- }
- d := dynamic.New(c.RESTClient())
- return &realHelmReleaseMonitor{d}, nil
-}
diff --git a/core/installer/server/appmanager/server.go b/core/installer/server/appmanager/server.go
index b26fc85..80f076a 100644
--- a/core/installer/server/appmanager/server.go
+++ b/core/installer/server/appmanager/server.go
@@ -22,6 +22,7 @@
"github.com/giolekva/pcloud/core/installer/cluster"
"github.com/giolekva/pcloud/core/installer/server"
"github.com/giolekva/pcloud/core/installer/soft"
+ "github.com/giolekva/pcloud/core/installer/status"
"github.com/giolekva/pcloud/core/installer/tasks"
)
@@ -38,19 +39,21 @@
}
type Server struct {
- l sync.Locker
- port int
- ssClient soft.Client
- repo soft.RepoIO
- m *installer.AppManager
- r installer.AppRepository
- fr installer.AppRepository
- reconciler *tasks.FixedReconciler
- h installer.HelmReleaseMonitor
- cnc installer.ClusterNetworkConfigurator
- vpnAPIClient installer.VPNAPIClient
- tasks map[string]*taskForward
- tmpl tmplts
+ l sync.Locker
+ port int
+ ssClient soft.Client
+ repo soft.RepoIO
+ m *installer.AppManager
+ r installer.AppRepository
+ fr installer.AppRepository
+ reconciler *tasks.FixedReconciler
+ h status.ResourceMonitor
+ im *status.InstanceMonitor
+ cnc installer.ClusterNetworkConfigurator
+ vpnAPIClient installer.VPNAPIClient
+ tasks map[string]*taskForward
+ tmpl tmplts
+ idToResources map[string]map[string][]status.Resource
}
type tmplts struct {
@@ -104,7 +107,8 @@
r installer.AppRepository,
fr installer.AppRepository,
reconciler *tasks.FixedReconciler,
- h installer.HelmReleaseMonitor,
+ h status.ResourceMonitor,
+ im *status.InstanceMonitor,
cnc installer.ClusterNetworkConfigurator,
vpnAPIClient installer.VPNAPIClient,
) (*Server, error) {
@@ -113,19 +117,21 @@
return nil, err
}
return &Server{
- l: &sync.Mutex{},
- port: port,
- ssClient: ssClient,
- repo: repo,
- m: m,
- r: r,
- fr: fr,
- reconciler: reconciler,
- h: h,
- cnc: cnc,
- vpnAPIClient: vpnAPIClient,
- tasks: make(map[string]*taskForward),
- tmpl: tmpl,
+ l: &sync.Mutex{},
+ port: port,
+ ssClient: ssClient,
+ repo: repo,
+ m: m,
+ r: r,
+ fr: fr,
+ reconciler: reconciler,
+ h: h,
+ im: im,
+ cnc: cnc,
+ vpnAPIClient: vpnAPIClient,
+ tasks: make(map[string]*taskForward),
+ tmpl: tmpl,
+ idToResources: make(map[string]map[string][]status.Resource),
}, nil
}
@@ -142,7 +148,7 @@
r.HandleFunc("/api/instance/{slug}", s.handleInstance).Methods(http.MethodGet)
r.HandleFunc("/api/instance/{slug}/update", s.handleAppUpdate).Methods(http.MethodPost)
r.HandleFunc("/api/instance/{slug}/remove", s.handleAppRemove).Methods(http.MethodPost)
- r.HandleFunc("/api/tasks/{instanceId}", s.handleTaskStatusAPI).Methods(http.MethodGet)
+ r.HandleFunc("/api/instance/{instanceId}/status", s.handleInstanceStatusAPI).Methods(http.MethodGet)
r.HandleFunc("/api/dodo-app/{instanceId}", s.handleDodoAppUpdate).Methods(http.MethodPut)
r.HandleFunc("/api/dodo-app", s.handleDodoAppInstall).Methods(http.MethodPost)
r.HandleFunc("/clusters/{cluster}/servers/{server}/remove", s.handleClusterRemoveServer).Methods(http.MethodPost)
@@ -207,6 +213,24 @@
http.Error(w, err.Error(), http.StatusInternalServerError)
return
} else {
+ var toMonitor []status.Resource
+ s.idToResources[instanceId] = map[string][]status.Resource{}
+ for _, r := range rr.Helm {
+ resource := status.Resource{
+ Type: status.ResourceHelmRelease,
+ ResourceRef: status.ResourceRef{
+ Name: r.Name,
+ Namespace: r.Namespace,
+ },
+ }
+ toMonitor = append(toMonitor, resource)
+ if tmp, ok := s.idToResources[instanceId][r.Id]; ok {
+ s.idToResources[instanceId][r.Id] = append(tmp, resource)
+ } else {
+ s.idToResources[instanceId][r.Id] = []status.Resource{resource}
+ }
+ }
+ s.im.Monitor(instanceId, toMonitor)
var cfg dodoAppRendered
if err := json.NewDecoder(bytes.NewReader(rr.RenderedRaw)).Decode(&cfg); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
@@ -255,6 +279,24 @@
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
+ var toMonitor []status.Resource
+ s.idToResources[instanceId] = map[string][]status.Resource{}
+ for _, r := range rr.Helm {
+ resource := status.Resource{
+ Type: status.ResourceHelmRelease,
+ ResourceRef: status.ResourceRef{
+ Name: r.Name,
+ Namespace: r.Namespace,
+ },
+ }
+ toMonitor = append(toMonitor, resource)
+ if tmp, ok := s.idToResources[instanceId][r.Id]; ok {
+ s.idToResources[instanceId][r.Id] = append(tmp, resource)
+ } else {
+ s.idToResources[instanceId][r.Id] = []status.Resource{resource}
+ }
+ }
+ s.im.Monitor(instanceId, toMonitor)
t := tasks.NewInstallTask(s.h, func() (installer.ReleaseResources, error) {
if err == nil {
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
@@ -725,7 +767,69 @@
return ret
}
-func (s *Server) handleTaskStatusAPI(w http.ResponseWriter, r *http.Request) {
+type IdName struct {
+ Id string
+ Name string
+}
+
+type IdNameMap map[string]IdName
+
+type resourceOuts struct {
+ Outs map[string]struct {
+ PostgreSQL IdNameMap `json:"postgresql"`
+ MongoDB IdNameMap `json:"mongodb"`
+ Volume IdNameMap `json:"volume"`
+ Ingress IdNameMap `json:"ingress"`
+ } `json:"outs"`
+}
+
+type DodoResource struct {
+ Type string
+ Name string
+}
+
+type DodoResourceStatus struct {
+ Type string `json:"type"`
+ Name string `json:"name"`
+ Status string `json:"status"`
+}
+
+func orginize(raw []byte) (map[string]DodoResource, error) {
+ var outs resourceOuts
+ if err := json.NewDecoder(bytes.NewReader(raw)).Decode(&outs); err != nil {
+ return nil, err
+ }
+ ret := map[string]DodoResource{}
+ for _, out := range outs.Outs {
+ for _, r := range out.PostgreSQL {
+ ret[r.Id] = DodoResource{
+ Type: "postgresql",
+ Name: r.Name,
+ }
+ }
+ for _, r := range out.MongoDB {
+ ret[r.Id] = DodoResource{
+ Type: "mongodb",
+ Name: r.Name,
+ }
+ }
+ for _, r := range out.Volume {
+ ret[r.Id] = DodoResource{
+ Type: "volume",
+ Name: r.Name,
+ }
+ }
+ for _, r := range out.Ingress {
+ ret[r.Id] = DodoResource{
+ Type: "ingress",
+ Name: r.Name,
+ }
+ }
+ }
+ return ret, nil
+}
+
+func (s *Server) handleInstanceStatusAPI(w http.ResponseWriter, r *http.Request) {
s.l.Lock()
defer s.l.Unlock()
instanceId, ok := mux.Vars(r)["instanceId"]
@@ -733,16 +837,40 @@
http.Error(w, "empty slug", http.StatusBadRequest)
return
}
- t, ok := s.tasks[instanceId]
- if !ok {
- http.Error(w, "task not found", http.StatusInternalServerError)
+ statuses, err := s.im.Get(instanceId)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
- if ok && t.task == nil {
- http.Error(w, "not found", http.StatusNotFound)
+ idStatus := map[string]status.Status{}
+ for id, resources := range s.idToResources[instanceId] {
+ st := status.StatusNoStatus
+ for _, resource := range resources {
+ if st < statuses[resource] {
+ st = statuses[resource]
+ }
+ }
+ idStatus[id] = st
+ }
+ s.repo.Pull()
+ rendered, err := s.m.AppRendered(instanceId)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
- resources := extractResources(t.task)
+ idToResource, err := orginize(rendered)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ resources := []DodoResourceStatus{}
+ for id, st := range idStatus {
+ resources = append(resources, DodoResourceStatus{
+ Type: idToResource[id].Type,
+ Name: idToResource[id].Name,
+ Status: status.StatusString(st),
+ })
+ }
json.NewEncoder(w).Encode(resources)
}
diff --git a/core/installer/status/background.go b/core/installer/status/background.go
new file mode 100644
index 0000000..bebffde
--- /dev/null
+++ b/core/installer/status/background.go
@@ -0,0 +1,47 @@
+package status
+
+import (
+ "sync"
+ "time"
+)
+
+type ste struct {
+ status Status
+ error error
+}
+
+type backgroundMonitor struct {
+ l sync.Locker
+ m ResourceMonitor
+ interval time.Duration
+ cache map[ResourceRef]ste
+}
+
+func NewBackgroundMonitor(m ResourceMonitor, interval time.Duration) ResourceMonitor {
+ return &backgroundMonitor{
+ &sync.Mutex{},
+ m,
+ interval,
+ map[ResourceRef]ste{},
+ }
+}
+
+func (m *backgroundMonitor) Get(ref ResourceRef) (Status, error) {
+ m.l.Lock()
+ defer m.l.Unlock()
+ if ret, ok := m.cache[ref]; ok {
+ return ret.status, ret.error
+ }
+ m.cache[ref] = ste{StatusNotFound, nil}
+ go func() {
+ st, err := m.m.Get(ref)
+ m.l.Lock()
+ m.cache[ref] = ste{st, err}
+ m.l.Unlock()
+ if IsStatusTerminal(st) {
+ return
+ }
+ time.Sleep(m.interval)
+ }()
+ return StatusNotFound, nil
+}
diff --git a/core/installer/status/helm.go b/core/installer/status/helm.go
new file mode 100644
index 0000000..b2db1d4
--- /dev/null
+++ b/core/installer/status/helm.go
@@ -0,0 +1,67 @@
+package status
+
+import (
+ "context"
+ "encoding/json"
+
+ "github.com/giolekva/pcloud/core/installer/kube"
+
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/dynamic"
+)
+
+type helmRelease struct {
+ Status struct {
+ Conditions []struct {
+ Type string `json:"type"`
+ Status string `json:"status"`
+ } `json:"conditions"`
+ } `json:"status,omitempty"`
+}
+
+type helmReleaseMonitor struct {
+ d dynamic.Interface
+}
+
+func (m *helmReleaseMonitor) Get(ref ResourceRef) (Status, error) {
+ ctx := context.Background()
+ res, err := m.d.Resource(
+ schema.GroupVersionResource{
+ Group: "helm.toolkit.fluxcd.io",
+ Version: "v2beta1",
+ Resource: "helmreleases",
+ },
+ ).Namespace(ref.Namespace).Get(ctx, ref.Name, metav1.GetOptions{})
+ if err != nil {
+ if errors.IsNotFound(err) {
+ return StatusNotFound, nil
+ }
+ return StatusNoStatus, err
+ }
+ b, err := res.MarshalJSON()
+ if err != nil {
+ return StatusNoStatus, err
+ }
+ var hr helmRelease
+ if err := json.Unmarshal(b, &hr); err != nil {
+ return StatusNoStatus, err
+ }
+ // TODO(gio): check more thoroughly
+ for _, c := range hr.Status.Conditions {
+ if c.Type == "Ready" && c.Status == "True" {
+ return StatusSuccess, nil
+ }
+ }
+ return StatusProcessing, nil
+}
+
+func NewHelmReleaseMonitor(kubeconfig string) (ResourceMonitor, error) {
+ c, err := kube.NewKubeClient(kube.KubeConfigOpts{KubeConfigPath: kubeconfig})
+ if err != nil {
+ return nil, err
+ }
+ d := dynamic.New(c.RESTClient())
+ return &helmReleaseMonitor{d}, nil
+}
diff --git a/core/installer/status/instance.go b/core/installer/status/instance.go
new file mode 100644
index 0000000..285f68c
--- /dev/null
+++ b/core/installer/status/instance.go
@@ -0,0 +1,28 @@
+package status
+
+type InstanceMonitor struct {
+ m Monitor
+ instances map[string][]Resource
+}
+
+func NewInstanceMonitor(m Monitor) *InstanceMonitor {
+ return &InstanceMonitor{
+ m: m,
+ instances: map[string][]Resource{},
+ }
+}
+
+func (m *InstanceMonitor) Monitor(id string, resources []Resource) {
+ m.instances[id] = resources
+}
+
+func (m *InstanceMonitor) Get(id string) (ret map[Resource]Status, err error) {
+ ret = map[Resource]Status{}
+ for _, r := range m.instances[id] {
+ ret[r], err = m.m.Get(r)
+ if err != nil {
+ break
+ }
+ }
+ return
+}
diff --git a/core/installer/status/status.go b/core/installer/status/status.go
new file mode 100644
index 0000000..ebec72f
--- /dev/null
+++ b/core/installer/status/status.go
@@ -0,0 +1,87 @@
+package status
+
+import (
+ "fmt"
+)
+
+type Status int
+
+const (
+ StatusNoStatus Status = iota
+ StatusNotFound
+ StatusPending
+ StatusProcessing
+ StatusSuccess
+ StatusFailure
+)
+
+func IsStatusTerminal(s Status) bool {
+ return s == StatusSuccess || s == StatusFailure
+}
+
+func StatusString(s Status) string {
+ switch s {
+ case StatusNoStatus:
+ return "no_status"
+ case StatusNotFound:
+ return "not_found"
+ case StatusPending:
+ return "pending"
+ case StatusProcessing:
+ return "processing"
+ case StatusSuccess:
+ return "success"
+ case StatusFailure:
+ return "failure"
+ default:
+ panic("MUST NOT REACH!")
+ }
+}
+
+type ResourceType int
+
+const (
+ ResourceHelmRelease ResourceType = iota
+)
+
+type ResourceRef struct {
+ Name string
+ Namespace string
+}
+
+type Resource struct {
+ ResourceRef
+ Type ResourceType
+}
+
+type Monitor interface {
+ Get(r Resource) (Status, error)
+}
+
+type ResourceMonitor interface {
+ Get(r ResourceRef) (Status, error)
+}
+
+type delegatingMonitor struct {
+ m map[ResourceType]ResourceMonitor
+}
+
+func NewDelegatingMonitor(kubeconfig string) (Monitor, error) {
+ helm, err := NewHelmReleaseMonitor(kubeconfig)
+ if err != nil {
+ return nil, err
+ }
+ return delegatingMonitor{
+ map[ResourceType]ResourceMonitor{
+ ResourceHelmRelease: helm,
+ },
+ }, nil
+}
+
+func (m delegatingMonitor) Get(r Resource) (Status, error) {
+ if rm, ok := m.m[r.Type]; !ok {
+ return StatusNoStatus, fmt.Errorf("unknown resource type: %d", r.Type)
+ } else {
+ return rm.Get(r.ResourceRef)
+ }
+}
diff --git a/core/installer/tasks/install.go b/core/installer/tasks/install.go
index b704c68..c3a0086 100644
--- a/core/installer/tasks/install.go
+++ b/core/installer/tasks/install.go
@@ -7,6 +7,7 @@
"github.com/giolekva/pcloud/core/installer"
"github.com/giolekva/pcloud/core/installer/cluster"
"github.com/giolekva/pcloud/core/installer/soft"
+ "github.com/giolekva/pcloud/core/installer/status"
)
type InstallFunc func() (installer.ReleaseResources, error)
@@ -23,7 +24,7 @@
d.t = append(d.t, t)
}
-func NewInstallTask(mon installer.HelmReleaseMonitor, fn InstallFunc) Task {
+func NewInstallTask(mon status.ResourceMonitor, fn InstallFunc) Task {
d := &dynamicTaskSlice{t: []Task{}}
var rr installer.ReleaseResources
done := make(chan error)
diff --git a/core/installer/tasks/release.go b/core/installer/tasks/release.go
index 059d1ee..1bd0d6e 100644
--- a/core/installer/tasks/release.go
+++ b/core/installer/tasks/release.go
@@ -4,9 +4,10 @@
"time"
"github.com/giolekva/pcloud/core/installer"
+ "github.com/giolekva/pcloud/core/installer/status"
)
-func NewMonitorReleaseTasks(mon installer.HelmReleaseMonitor, rr installer.ReleaseResources) []Task {
+func NewMonitorReleaseTasks(mon status.ResourceMonitor, rr installer.ReleaseResources) []Task {
var t []Task
for _, h := range rr.Helm {
t = append(t, newMonitorHelm(mon, h))
@@ -14,11 +15,11 @@
return t
}
-func NewMonitorRelease(mon installer.HelmReleaseMonitor, rr installer.ReleaseResources) Task {
+func NewMonitorRelease(mon status.ResourceMonitor, rr installer.ReleaseResources) Task {
return newConcurrentParentTask("Monitor", true, NewMonitorReleaseTasks(mon, rr)...)
}
-func newMonitorHelm(mon installer.HelmReleaseMonitor, h installer.Resource) Task {
+func newMonitorHelm(mon status.ResourceMonitor, h installer.Resource) Task {
rType := h.Annotations["dodo.cloud/resource-type"]
var name string
switch rType {
@@ -36,11 +37,18 @@
name = h.Annotations["dodo.cloud/resource.service.name"]
}
t := newResourceLeafTask(h.Info, &ResourceId{rType, name}, func() error {
+ ref := status.ResourceRef{
+ Name: h.Name,
+ Namespace: h.Namespace,
+ }
for {
- if ok, err := mon.IsReleased(h.Namespace, h.Name); err == nil && ok {
- break
+ if s, err := mon.Get(ref); err != nil {
+ return err
+ } else if status.IsStatusTerminal(s) {
+ return nil
+ } else {
+ time.Sleep(5 * time.Second)
}
- time.Sleep(5 * time.Second)
}
return nil
})