resource renderer
diff --git a/core/resource-renderer/config/crd/bases/dodo.cloud.dodo.cloud_resourcerenderers.yaml b/core/resource-renderer/config/crd/bases/dodo.cloud.dodo.cloud_resourcerenderers.yaml
new file mode 100644
index 0000000..83a80c5
--- /dev/null
+++ b/core/resource-renderer/config/crd/bases/dodo.cloud.dodo.cloud_resourcerenderers.yaml
@@ -0,0 +1,54 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+  annotations:
+    controller-gen.kubebuilder.io/version: v0.9.2
+  creationTimestamp: null
+  name: resourcerenderers.dodo.cloud.dodo.cloud
+spec:
+  group: dodo.cloud.dodo.cloud
+  names:
+    kind: ResourceRenderer
+    listKind: ResourceRendererList
+    plural: resourcerenderers
+    singular: resourcerenderer
+  scope: Namespaced
+  versions:
+  - name: v1
+    schema:
+      openAPIV3Schema:
+        description: ResourceRenderer is the Schema for the resourcerenderers API
+        properties:
+          apiVersion:
+            description: 'APIVersion defines the versioned schema of this representation
+              of an object. Servers should convert recognized schemas to the latest
+              internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+            type: string
+          kind:
+            description: 'Kind is a string value representing the REST resource this
+              object represents. Servers may infer this from the endpoint the client
+              submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+            type: string
+          metadata:
+            type: object
+          spec:
+            description: ResourceRendererSpec defines the desired state of ResourceRenderer
+            properties:
+              resourceTemplate:
+                type: string
+              secretName:
+                description: Foo is an example field of ResourceRenderer. Edit resourcerenderer_types.go
+                  to remove/update
+                type: string
+              secretNamespace:
+                type: string
+            type: object
+          status:
+            description: ResourceRendererStatus defines the observed state of ResourceRenderer
+            type: object
+        type: object
+    served: true
+    storage: true
+    subresources:
+      status: {}
diff --git a/core/resource-renderer/config/crd/kustomization.yaml b/core/resource-renderer/config/crd/kustomization.yaml
new file mode 100644
index 0000000..2942988
--- /dev/null
+++ b/core/resource-renderer/config/crd/kustomization.yaml
@@ -0,0 +1,21 @@
+# This kustomization.yaml is not intended to be run by itself,
+# since it depends on service name and namespace that are out of this kustomize package.
+# It should be run by config/default
+resources:
+- bases/dodo.cloud.dodo.cloud_resourcerenderers.yaml
+#+kubebuilder:scaffold:crdkustomizeresource
+
+patchesStrategicMerge:
+# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
+# patches here are for enabling the conversion webhook for each CRD
+#- patches/webhook_in_resourcerenderers.yaml
+#+kubebuilder:scaffold:crdkustomizewebhookpatch
+
+# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
+# patches here are for enabling the CA injection for each CRD
+#- patches/cainjection_in_resourcerenderers.yaml
+#+kubebuilder:scaffold:crdkustomizecainjectionpatch
+
+# the following config is for teaching kustomize how to do kustomization for CRDs.
+configurations:
+- kustomizeconfig.yaml
diff --git a/core/resource-renderer/config/crd/kustomizeconfig.yaml b/core/resource-renderer/config/crd/kustomizeconfig.yaml
new file mode 100644
index 0000000..ec5c150
--- /dev/null
+++ b/core/resource-renderer/config/crd/kustomizeconfig.yaml
@@ -0,0 +1,19 @@
+# This file is for teaching kustomize how to substitute name and namespace reference in CRD
+nameReference:
+- kind: Service
+  version: v1
+  fieldSpecs:
+  - kind: CustomResourceDefinition
+    version: v1
+    group: apiextensions.k8s.io
+    path: spec/conversion/webhook/clientConfig/service/name
+
+namespace:
+- kind: CustomResourceDefinition
+  version: v1
+  group: apiextensions.k8s.io
+  path: spec/conversion/webhook/clientConfig/service/namespace
+  create: false
+
+varReference:
+- path: metadata/annotations
diff --git a/core/resource-renderer/config/crd/patches/cainjection_in_resourcerenderers.yaml b/core/resource-renderer/config/crd/patches/cainjection_in_resourcerenderers.yaml
new file mode 100644
index 0000000..526bd3b
--- /dev/null
+++ b/core/resource-renderer/config/crd/patches/cainjection_in_resourcerenderers.yaml
@@ -0,0 +1,7 @@
+# The following patch adds a directive for certmanager to inject CA into the CRD
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+  annotations:
+    cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
+  name: resourcerenderers.dodo.cloud.dodo.cloud
diff --git a/core/resource-renderer/config/crd/patches/webhook_in_resourcerenderers.yaml b/core/resource-renderer/config/crd/patches/webhook_in_resourcerenderers.yaml
new file mode 100644
index 0000000..72a1f5b
--- /dev/null
+++ b/core/resource-renderer/config/crd/patches/webhook_in_resourcerenderers.yaml
@@ -0,0 +1,16 @@
+# The following patch enables a conversion webhook for the CRD
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+  name: resourcerenderers.dodo.cloud.dodo.cloud
+spec:
+  conversion:
+    strategy: Webhook
+    webhook:
+      clientConfig:
+        service:
+          namespace: system
+          name: webhook-service
+          path: /convert
+      conversionReviewVersions:
+      - v1
diff --git a/core/resource-renderer/config/default/kustomization.yaml b/core/resource-renderer/config/default/kustomization.yaml
new file mode 100644
index 0000000..f550b74
--- /dev/null
+++ b/core/resource-renderer/config/default/kustomization.yaml
@@ -0,0 +1,74 @@
+# Adds namespace to all resources.
+namespace: resource-renderer-system
+
+# Value of this field is prepended to the
+# names of all resources, e.g. a deployment named
+# "wordpress" becomes "alices-wordpress".
+# Note that it should also match with the prefix (text before '-') of the namespace
+# field above.
+namePrefix: resource-renderer-
+
+# Labels to add to all resources and selectors.
+#commonLabels:
+#  someName: someValue
+
+bases:
+- ../crd
+- ../rbac
+- ../manager
+# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
+# crd/kustomization.yaml
+#- ../webhook
+# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
+#- ../certmanager
+# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
+#- ../prometheus
+
+patchesStrategicMerge:
+# Protect the /metrics endpoint by putting it behind auth.
+# If you want your controller-manager to expose the /metrics
+# endpoint w/o any authn/z, please comment the following line.
+- manager_auth_proxy_patch.yaml
+
+# Mount the controller config file for loading manager configurations
+# through a ComponentConfig type
+#- manager_config_patch.yaml
+
+# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
+# crd/kustomization.yaml
+#- manager_webhook_patch.yaml
+
+# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
+# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
+# 'CERTMANAGER' needs to be enabled to use ca injection
+#- webhookcainjection_patch.yaml
+
+# the following config is for teaching kustomize how to do var substitution
+vars:
+# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
+#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
+#  objref:
+#    kind: Certificate
+#    group: cert-manager.io
+#    version: v1
+#    name: serving-cert # this name should match the one in certificate.yaml
+#  fieldref:
+#    fieldpath: metadata.namespace
+#- name: CERTIFICATE_NAME
+#  objref:
+#    kind: Certificate
+#    group: cert-manager.io
+#    version: v1
+#    name: serving-cert # this name should match the one in certificate.yaml
+#- name: SERVICE_NAMESPACE # namespace of the service
+#  objref:
+#    kind: Service
+#    version: v1
+#    name: webhook-service
+#  fieldref:
+#    fieldpath: metadata.namespace
+#- name: SERVICE_NAME
+#  objref:
+#    kind: Service
+#    version: v1
+#    name: webhook-service
diff --git a/core/resource-renderer/config/default/manager_auth_proxy_patch.yaml b/core/resource-renderer/config/default/manager_auth_proxy_patch.yaml
new file mode 100644
index 0000000..cec149a
--- /dev/null
+++ b/core/resource-renderer/config/default/manager_auth_proxy_patch.yaml
@@ -0,0 +1,39 @@
+# This patch inject a sidecar container which is a HTTP proxy for the
+# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: controller-manager
+  namespace: system
+spec:
+  template:
+    spec:
+      containers:
+      - name: kube-rbac-proxy
+        securityContext:
+          allowPrivilegeEscalation: false
+          capabilities:
+            drop:
+              - "ALL"
+        image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0
+        args:
+        - "--secure-listen-address=0.0.0.0:8443"
+        - "--upstream=http://127.0.0.1:8080/"
+        - "--logtostderr=true"
+        - "--v=0"
+        ports:
+        - containerPort: 8443
+          protocol: TCP
+          name: https
+        resources:
+          limits:
+            cpu: 500m
+            memory: 128Mi
+          requests:
+            cpu: 5m
+            memory: 64Mi
+      - name: manager
+        args:
+        - "--health-probe-bind-address=:8081"
+        - "--metrics-bind-address=127.0.0.1:8080"
+        - "--leader-elect"
diff --git a/core/resource-renderer/config/default/manager_config_patch.yaml b/core/resource-renderer/config/default/manager_config_patch.yaml
new file mode 100644
index 0000000..6c40015
--- /dev/null
+++ b/core/resource-renderer/config/default/manager_config_patch.yaml
@@ -0,0 +1,20 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: controller-manager
+  namespace: system
+spec:
+  template:
+    spec:
+      containers:
+      - name: manager
+        args:
+        - "--config=controller_manager_config.yaml"
+        volumeMounts:
+        - name: manager-config
+          mountPath: /controller_manager_config.yaml
+          subPath: controller_manager_config.yaml
+      volumes:
+      - name: manager-config
+        configMap:
+          name: manager-config
diff --git a/core/resource-renderer/config/manager/controller_manager_config.yaml b/core/resource-renderer/config/manager/controller_manager_config.yaml
new file mode 100644
index 0000000..8d7e816
--- /dev/null
+++ b/core/resource-renderer/config/manager/controller_manager_config.yaml
@@ -0,0 +1,21 @@
+apiVersion: controller-runtime.sigs.k8s.io/v1alpha1
+kind: ControllerManagerConfig
+health:
+  healthProbeBindAddress: :8081
+metrics:
+  bindAddress: 127.0.0.1:8080
+webhook:
+  port: 9443
+leaderElection:
+  leaderElect: true
+  resourceName: 798a733c.dodo.cloud
+# leaderElectionReleaseOnCancel defines if the leader should step down volume
+# when the Manager ends. This requires the binary to immediately end when the
+# Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
+# speeds up voluntary leader transitions as the new leader don't have to wait
+# LeaseDuration time first.
+# In the default scaffold provided, the program ends immediately after
+# the manager stops, so would be fine to enable this option. However,
+# if you are doing or is intended to do any operation such as perform cleanups
+# after the manager stops then its usage might be unsafe.
+# leaderElectionReleaseOnCancel: true
diff --git a/core/resource-renderer/config/manager/kustomization.yaml b/core/resource-renderer/config/manager/kustomization.yaml
new file mode 100644
index 0000000..2bcd3ee
--- /dev/null
+++ b/core/resource-renderer/config/manager/kustomization.yaml
@@ -0,0 +1,10 @@
+resources:
+- manager.yaml
+
+generatorOptions:
+  disableNameSuffixHash: true
+
+configMapGenerator:
+- name: manager-config
+  files:
+  - controller_manager_config.yaml
diff --git a/core/resource-renderer/config/manager/manager.yaml b/core/resource-renderer/config/manager/manager.yaml
new file mode 100644
index 0000000..878ad48
--- /dev/null
+++ b/core/resource-renderer/config/manager/manager.yaml
@@ -0,0 +1,70 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+  labels:
+    control-plane: controller-manager
+  name: system
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: controller-manager
+  namespace: system
+  labels:
+    control-plane: controller-manager
+spec:
+  selector:
+    matchLabels:
+      control-plane: controller-manager
+  replicas: 1
+  template:
+    metadata:
+      annotations:
+        kubectl.kubernetes.io/default-container: manager
+      labels:
+        control-plane: controller-manager
+    spec:
+      securityContext:
+        runAsNonRoot: true
+        # TODO(user): For common cases that do not require escalating privileges
+        # it is recommended to ensure that all your Pods/Containers are restrictive.
+        # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
+        # Please uncomment the following code if your project does NOT have to work on old Kubernetes
+        # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ).
+        # seccompProfile:
+        #   type: RuntimeDefault
+      containers:
+      - command:
+        - /manager
+        args:
+        - --leader-elect
+        image: controller:latest
+        name: manager
+        securityContext:
+          allowPrivilegeEscalation: false
+          capabilities:
+            drop:
+              - "ALL"
+        livenessProbe:
+          httpGet:
+            path: /healthz
+            port: 8081
+          initialDelaySeconds: 15
+          periodSeconds: 20
+        readinessProbe:
+          httpGet:
+            path: /readyz
+            port: 8081
+          initialDelaySeconds: 5
+          periodSeconds: 10
+        # TODO(user): Configure the resources accordingly based on the project requirements.
+        # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+        resources:
+          limits:
+            cpu: 500m
+            memory: 128Mi
+          requests:
+            cpu: 10m
+            memory: 64Mi
+      serviceAccountName: controller-manager
+      terminationGracePeriodSeconds: 10
diff --git a/core/resource-renderer/config/prometheus/kustomization.yaml b/core/resource-renderer/config/prometheus/kustomization.yaml
new file mode 100644
index 0000000..ed13716
--- /dev/null
+++ b/core/resource-renderer/config/prometheus/kustomization.yaml
@@ -0,0 +1,2 @@
+resources:
+- monitor.yaml
diff --git a/core/resource-renderer/config/prometheus/monitor.yaml b/core/resource-renderer/config/prometheus/monitor.yaml
new file mode 100644
index 0000000..d19136a
--- /dev/null
+++ b/core/resource-renderer/config/prometheus/monitor.yaml
@@ -0,0 +1,20 @@
+
+# Prometheus Monitor Service (Metrics)
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    control-plane: controller-manager
+  name: controller-manager-metrics-monitor
+  namespace: system
+spec:
+  endpoints:
+    - path: /metrics
+      port: https
+      scheme: https
+      bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
+      tlsConfig:
+        insecureSkipVerify: true
+  selector:
+    matchLabels:
+      control-plane: controller-manager
diff --git a/core/resource-renderer/config/rbac/auth_proxy_client_clusterrole.yaml b/core/resource-renderer/config/rbac/auth_proxy_client_clusterrole.yaml
new file mode 100644
index 0000000..51a75db
--- /dev/null
+++ b/core/resource-renderer/config/rbac/auth_proxy_client_clusterrole.yaml
@@ -0,0 +1,9 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: metrics-reader
+rules:
+- nonResourceURLs:
+  - "/metrics"
+  verbs:
+  - get
diff --git a/core/resource-renderer/config/rbac/auth_proxy_role.yaml b/core/resource-renderer/config/rbac/auth_proxy_role.yaml
new file mode 100644
index 0000000..80e1857
--- /dev/null
+++ b/core/resource-renderer/config/rbac/auth_proxy_role.yaml
@@ -0,0 +1,17 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: proxy-role
+rules:
+- apiGroups:
+  - authentication.k8s.io
+  resources:
+  - tokenreviews
+  verbs:
+  - create
+- apiGroups:
+  - authorization.k8s.io
+  resources:
+  - subjectaccessreviews
+  verbs:
+  - create
diff --git a/core/resource-renderer/config/rbac/auth_proxy_role_binding.yaml b/core/resource-renderer/config/rbac/auth_proxy_role_binding.yaml
new file mode 100644
index 0000000..ec7acc0
--- /dev/null
+++ b/core/resource-renderer/config/rbac/auth_proxy_role_binding.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: proxy-rolebinding
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: proxy-role
+subjects:
+- kind: ServiceAccount
+  name: controller-manager
+  namespace: system
diff --git a/core/resource-renderer/config/rbac/auth_proxy_service.yaml b/core/resource-renderer/config/rbac/auth_proxy_service.yaml
new file mode 100644
index 0000000..71f1797
--- /dev/null
+++ b/core/resource-renderer/config/rbac/auth_proxy_service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    control-plane: controller-manager
+  name: controller-manager-metrics-service
+  namespace: system
+spec:
+  ports:
+  - name: https
+    port: 8443
+    protocol: TCP
+    targetPort: https
+  selector:
+    control-plane: controller-manager
diff --git a/core/resource-renderer/config/rbac/kustomization.yaml b/core/resource-renderer/config/rbac/kustomization.yaml
new file mode 100644
index 0000000..731832a
--- /dev/null
+++ b/core/resource-renderer/config/rbac/kustomization.yaml
@@ -0,0 +1,18 @@
+resources:
+# All RBAC will be applied under this service account in
+# the deployment namespace. You may comment out this resource
+# if your manager will use a service account that exists at
+# runtime. Be sure to update RoleBinding and ClusterRoleBinding
+# subjects if changing service account names.
+- service_account.yaml
+- role.yaml
+- role_binding.yaml
+- leader_election_role.yaml
+- leader_election_role_binding.yaml
+# Comment the following 4 lines if you want to disable
+# the auth proxy (https://github.com/brancz/kube-rbac-proxy)
+# which protects your /metrics endpoint.
+- auth_proxy_service.yaml
+- auth_proxy_role.yaml
+- auth_proxy_role_binding.yaml
+- auth_proxy_client_clusterrole.yaml
diff --git a/core/resource-renderer/config/rbac/leader_election_role.yaml b/core/resource-renderer/config/rbac/leader_election_role.yaml
new file mode 100644
index 0000000..4190ec8
--- /dev/null
+++ b/core/resource-renderer/config/rbac/leader_election_role.yaml
@@ -0,0 +1,37 @@
+# permissions to do leader election.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: leader-election-role
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - coordination.k8s.io
+  resources:
+  - leases
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - ""
+  resources:
+  - events
+  verbs:
+  - create
+  - patch
diff --git a/core/resource-renderer/config/rbac/leader_election_role_binding.yaml b/core/resource-renderer/config/rbac/leader_election_role_binding.yaml
new file mode 100644
index 0000000..1d1321e
--- /dev/null
+++ b/core/resource-renderer/config/rbac/leader_election_role_binding.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: leader-election-rolebinding
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: leader-election-role
+subjects:
+- kind: ServiceAccount
+  name: controller-manager
+  namespace: system
diff --git a/core/resource-renderer/config/rbac/resourcerenderer_editor_role.yaml b/core/resource-renderer/config/rbac/resourcerenderer_editor_role.yaml
new file mode 100644
index 0000000..a9bdd9b
--- /dev/null
+++ b/core/resource-renderer/config/rbac/resourcerenderer_editor_role.yaml
@@ -0,0 +1,24 @@
+# permissions for end users to edit resourcerenderers.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: resourcerenderer-editor-role
+rules:
+- apiGroups:
+  - dodo.cloud.dodo.cloud
+  resources:
+  - resourcerenderers
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - dodo.cloud.dodo.cloud
+  resources:
+  - resourcerenderers/status
+  verbs:
+  - get
diff --git a/core/resource-renderer/config/rbac/resourcerenderer_viewer_role.yaml b/core/resource-renderer/config/rbac/resourcerenderer_viewer_role.yaml
new file mode 100644
index 0000000..26d0020
--- /dev/null
+++ b/core/resource-renderer/config/rbac/resourcerenderer_viewer_role.yaml
@@ -0,0 +1,20 @@
+# permissions for end users to view resourcerenderers.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: resourcerenderer-viewer-role
+rules:
+- apiGroups:
+  - dodo.cloud.dodo.cloud
+  resources:
+  - resourcerenderers
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - dodo.cloud.dodo.cloud
+  resources:
+  - resourcerenderers/status
+  verbs:
+  - get
diff --git a/core/resource-renderer/config/rbac/role.yaml b/core/resource-renderer/config/rbac/role.yaml
new file mode 100644
index 0000000..9f039d6
--- /dev/null
+++ b/core/resource-renderer/config/rbac/role.yaml
@@ -0,0 +1,57 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  creationTimestamp: null
+  name: manager-role
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - dodo.cloud.dodo.cloud
+  resources:
+  - resourcerenderers
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - dodo.cloud.dodo.cloud
+  resources:
+  - resourcerenderers/finalizers
+  verbs:
+  - update
+- apiGroups:
+  - dodo.cloud.dodo.cloud
+  resources:
+  - resourcerenderers/status
+  verbs:
+  - get
+  - patch
+  - update
diff --git a/core/resource-renderer/config/rbac/role_binding.yaml b/core/resource-renderer/config/rbac/role_binding.yaml
new file mode 100644
index 0000000..2070ede
--- /dev/null
+++ b/core/resource-renderer/config/rbac/role_binding.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: manager-rolebinding
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: manager-role
+subjects:
+- kind: ServiceAccount
+  name: controller-manager
+  namespace: system
diff --git a/core/resource-renderer/config/rbac/service_account.yaml b/core/resource-renderer/config/rbac/service_account.yaml
new file mode 100644
index 0000000..7cd6025
--- /dev/null
+++ b/core/resource-renderer/config/rbac/service_account.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: controller-manager
+  namespace: system
diff --git a/core/resource-renderer/config/samples/dodo.cloud_v1_resourcerenderer.yaml b/core/resource-renderer/config/samples/dodo.cloud_v1_resourcerenderer.yaml
new file mode 100644
index 0000000..7bde7b1
--- /dev/null
+++ b/core/resource-renderer/config/samples/dodo.cloud_v1_resourcerenderer.yaml
@@ -0,0 +1,303 @@
+apiVersion: dodo.cloud.dodo.cloud/v1
+kind: ResourceRenderer
+metadata:
+  name: resourcerenderer-sample
+  namespace: test
+spec:
+  secretName: oauth2-client-headscale
+  secretNamespace: lekva-app-headscale
+  resourceTemplate: |
+    apiVersion: v1
+    kind: ConfigMap
+    metadata:
+      name: config
+      namespace: test
+    data:
+      config.yaml: |
+        # headscale will look for a configuration file named `config.yaml` (or `config.json`) in the following order:
+        #
+        # - `/etc/headscale`
+        # - `~/.headscale`
+        # - current working directory
+
+        # The url clients will connect to.
+        # Typically this will be a domain like:
+        #
+        # https://myheadscale.example.com:443
+        #
+        server_url: https://.Values.domain
+
+        # Address to listen to / bind to on the server
+        #
+        # For production:
+        listen_addr: 0.0.0.0:8080
+        # listen_addr: 127.0.0.1:8080
+
+        # Address to listen to /metrics, you may want
+        # to keep this endpoint private to your internal
+        # network
+        #
+        metrics_listen_addr: 0.0.0.0:9090
+        # metrics_listen_addr: 127.0.0.1:9090
+
+        # Address to listen for gRPC.
+        # gRPC is used for controlling a headscale server
+        # remotely with the CLI
+        # Note: Remote access _only_ works if you have
+        # valid certificates.
+        #
+        # For production:
+        grpc_listen_addr: 0.0.0.0:50443
+        # grpc_listen_addr: 127.0.0.1:50443
+
+        # Allow the gRPC admin interface to run in INSECURE
+        # mode. This is not recommended as the traffic will
+        # be unencrypted. Only enable if you know what you
+        # are doing.
+        grpc_allow_insecure: false
+
+        # Private key used to encrypt the traffic between headscale
+        # and Tailscale clients.
+        # The private key file will be autogenerated if it's missing.
+        #
+        # For production:
+        # /var/lib/headscale/private.key
+        private_key_path: /headscale/data/private.key
+
+        # The Noise section includes specific configuration for the
+        # TS2021 Noise protocol
+        noise:
+          # The Noise private key is used to encrypt the
+          # traffic between headscale and Tailscale clients when
+          # using the new Noise-based protocol. It must be different
+          # from the legacy private key.
+          #
+          # For production:
+          # private_key_path: /var/lib/headscale/noise_private.key
+          private_key_path: /headscale/data/noise_private.key
+
+        # List of IP prefixes to allocate tailaddresses from.
+        # Each prefix consists of either an IPv4 or IPv6 address,
+        # and the associated prefix length, delimited by a slash.
+        ip_prefixes:
+          # - fd7a:115c:a1e0::/48
+          - 100.64.0.0/10
+
+        # DERP is a relay system that Tailscale uses when a direct
+        # connection cannot be established.
+        # https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp
+        #
+        # headscale needs a list of DERP servers that can be presented
+        # to the clients.
+        derp:
+          server:
+            # If enabled, runs the embedded DERP server and merges it into the rest of the DERP config
+            # The Headscale server_url defined above MUST be using https, DERP requires TLS to be in place
+            enabled: false
+
+            # Region ID to use for the embedded DERP server.
+            # The local DERP prevails if the region ID collides with other region ID coming from
+            # the regular DERP config.
+            region_id: 999
+
+            # Region code and name are displayed in the Tailscale UI to identify a DERP region
+            region_code: "headscale"
+            region_name: "Headscale Embedded DERP"
+
+            # Listens over UDP at the configured address for STUN connections - to help with NAT traversal.
+            # When the embedded DERP server is enabled stun_listen_addr MUST be defined.
+            #
+            # For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
+            stun_listen_addr: "0.0.0.0:3478"
+
+          # List of externally available DERP maps encoded in JSON
+          urls:
+            - https://controlplane.tailscale.com/derpmap/default
+
+          # Locally available DERP map files encoded in YAML
+          #
+          # This option is mostly interesting for people hosting
+          # their own DERP servers:
+          # https://tailscale.com/kb/1118/custom-derp-servers/
+          #
+          # paths:
+          #   - /etc/headscale/derp-example.yaml
+          paths: []
+
+          # If enabled, a worker will be set up to periodically
+          # refresh the given sources and update the derpmap
+          # will be set up.
+          auto_update_enabled: true
+
+          # How often should we check for DERP updates?
+          update_frequency: 24h
+
+        # Disables the automatic check for headscale updates on startup
+        disable_check_updates: true
+
+        # Time before an inactive ephemeral node is deleted?
+        ephemeral_node_inactivity_timeout: 30m
+
+        # Period to check for node updates within the tailnet. A value too low will severely affect
+        # CPU consumption of Headscale. A value too high (over 60s) will cause problems
+        # for the nodes, as they won't get updates or keep alive messages frequently enough.
+        # In case of doubts, do not touch the default 10s.
+        node_update_check_interval: 10s
+
+        # SQLite config
+        db_type: sqlite3
+
+        # For production:
+        # db_path: /var/lib/headscale/db.sqlite
+        db_path: /headscale/data/db.sqlite
+
+        # # Postgres config
+        # If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank.
+        # db_type: postgres
+        # db_host: localhost
+        # db_port: 5432
+        # db_name: headscale
+        # db_user: foo
+        # db_pass: bar
+
+        # If other 'sslmode' is required instead of 'require(true)' and 'disabled(false)', set the 'sslmode' you need
+        # in the 'db_ssl' field. Refers to https://www.postgresql.org/docs/current/libpq-ssl.html Table 34.1.
+        # db_ssl: false
+
+        ### TLS configuration
+        #
+        ## Let's encrypt / ACME
+        #
+        # headscale supports automatically requesting and setting up
+        # TLS for a domain with Let's Encrypt.
+        #
+        # URL to ACME directory
+        acme_url: https://acme-v02.api.letsencrypt.org/directory
+
+        # Email to register with ACME provider
+        acme_email: ""
+
+        # Domain name to request a TLS certificate for:
+        tls_letsencrypt_hostname: ""
+
+        # Path to store certificates and metadata needed by
+        # letsencrypt
+        # For production:
+        # tls_letsencrypt_cache_dir: /var/lib/headscale/cache
+        tls_letsencrypt_cache_dir: ./cache
+
+        # Type of ACME challenge to use, currently supported types:
+        # HTTP-01 or TLS-ALPN-01
+        # See [docs/tls.md](docs/tls.md) for more information
+        tls_letsencrypt_challenge_type: HTTP-01
+        # When HTTP-01 challenge is chosen, letsencrypt must set up a
+        # verification endpoint, and it will be listening on:
+        # :http = port 80
+        tls_letsencrypt_listen: ":http"
+
+        ## Use already defined certificates:
+        tls_cert_path: ""
+        tls_key_path: ""
+
+        log:
+          # Output formatting for logs: text or json
+          format: text
+          level: info
+
+        # Path to a file containg ACL policies.
+        # ACLs can be defined as YAML or HUJSON.
+        # https://tailscale.com/kb/1018/acls/
+        acl_policy_path: ""
+
+        ## DNS
+        #
+        # headscale supports Tailscale's DNS configuration and MagicDNS.
+        # Please have a look to their KB to better understand the concepts:
+        #
+        # - https://tailscale.com/kb/1054/dns/
+        # - https://tailscale.com/kb/1081/magicdns/
+        # - https://tailscale.com/blog/2021-09-private-dns-with-magicdns/
+        #
+        dns_config:
+          # Whether to prefer using Headscale provided DNS or use local.
+          override_local_dns: true
+
+          # List of DNS servers to expose to clients.
+          nameservers:
+            - 8.8.8.8
+            - 1.1.1.1
+
+          # NextDNS (see https://tailscale.com/kb/1218/nextdns/).
+          # "abc123" is example NextDNS ID, replace with yours.
+          #
+          # With metadata sharing:
+          # nameservers:
+          #   - https://dns.nextdns.io/abc123
+          #
+          # Without metadata sharing:
+          # nameservers:
+          #   - 2a07:a8c0::ab:c123
+          #   - 2a07:a8c1::ab:c123
+
+          # Split DNS (see https://tailscale.com/kb/1054/dns/),
+          # list of search domains and the DNS to query for each one.
+          #
+          # restricted_nameservers:
+          #   foo.bar.com:
+          #     - 1.1.1.1
+          #   darp.headscale.net:
+          #     - 1.1.1.1
+          #     - 8.8.8.8
+
+          # Search domains to inject.
+          domains: []
+
+          # Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
+          # Only works if there is at least a nameserver defined.
+          magic_dns: true
+
+          # Defines the base domain to create the hostnames for MagicDNS.
+          # `base_domain` must be a FQDNs, without the trailing dot.
+          # The FQDN of the hosts will be
+          # `hostname.namespace.base_domain` (e.g., _myhost.mynamespace.example.com_).
+          base_domain: .Values.internalBaseDomain
+
+        # Unix socket used for the CLI to connect without authentication
+        # Note: for production you will want to set this to something like:
+        # unix_socket: /var/run/headscale.sock
+        unix_socket: ./headscale.sock
+        unix_socket_permission: "0770"
+        #
+        # headscale supports experimental OpenID connect support,
+        # it is still being tested and might have some bugs, please
+        # help us test it.
+        # OpenID Connect
+        oidc:
+          only_start_if_oidc_is_available: false
+          issuer: .Values.oauth2.hydraPublic
+          client_id: {{ .client_id }}
+          client_secret: {{ .client_secret }}
+          scope: ["openid", "profile", "email"]
+          extra_params:
+            domain_hint: lekva.me
+          allowed_domains:
+            - lekva.me
+          # allowed_groups:
+          #   - /headscale
+          # allowed_users:
+          #   - alice@example.com
+          strip_email_domain: true
+
+        # Logtail configuration
+        # Logtail is Tailscales logging and auditing infrastructure, it allows the control panel
+        # to instruct tailscale nodes to log their activity to a remote server.
+        logtail:
+          # Enable logtail for this headscales clients.
+          # As there is currently no support for overriding the log server in headscale, this is
+          # disabled by default. Enabling this will make your clients send logs to Tailscale Inc.
+          enabled: false
+
+        # Enabling this option makes devices prefer a random port for WireGuard traffic over the
+        # default static port 41641. This option is intended as a workaround for some buggy
+        # firewall devices. See https://tailscale.com/kb/1181/firewalls/ for more information.
+        randomize_client_port: true