Installer: nebula controller
diff --git a/charts/auth/lighthouse.yaml b/charts/auth/lighthouse.yaml
deleted file mode 100644
index 66d3534..0000000
--- a/charts/auth/lighthouse.yaml
+++ /dev/null
@@ -1,246 +0,0 @@
-# This is the nebula example configuration file. You must edit, at a minimum, the static_host_map, lighthouse, and firewall sections
-# Some options in this file are HUPable, including the pki section. (A HUP will reload credentials from disk without affecting existing tunnels)
-
-# PKI defines the location of credentials for this node. Each of these can also be inlined by using the yaml ": |" syntax.
-pki:
- # The CAs that are accepted by this node. Must contain one or more certificates created by 'nebula-cert ca'
- ##ca: /etc/nebula/ca/ca.crt
- ca: /etc/nebula/lighthouse/ca.crt
- cert: /etc/nebula/lighthouse/host.crt
- key: /etc/nebula/lighthouse/host.key
- #blocklist is a list of certificate fingerprints that we will refuse to talk to
- #blocklist:
- # - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72
-
-# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
-# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
-# The syntax is:
-# "{nebula ip}": ["{routable ip/dns name}:{routable port}"]
-# Example, if your lighthouse has the nebula IP of 192.168.100.1 and has the real ip address of 100.64.22.11 and runs on port 4243:
-static_host_map:
- "<INTERNAL_IP>": ["<EXTERNAL_IP>:<PORT>"]
-
-
-lighthouse:
- # am_lighthouse is used to enable lighthouse functionality for a node. This should ONLY be true on nodes
- # you have configured to be lighthouses in your network
- am_lighthouse: false
- # serve_dns optionally starts a dns listener that responds to various queries and can even be
- # delegated to for resolution
- #serve_dns: false
- #dns:
- # The DNS host defines the IP to bind the dns listener to. This also allows binding to the nebula node IP.
- #host: 0.0.0.0
- #port: 53
- # interval is the number of seconds between updates from this node to a lighthouse.
- # during updates, a node sends information about its current IP addresses to each node.
- interval: 60
- # hosts is a list of lighthouse hosts this node should report to and query from
- # IMPORTANT: THIS SHOULD BE EMPTY ON LIGHTHOUSE NODES
- # IMPORTANT2: THIS SHOULD BE LIGHTHOUSES' NEBULA IPs, NOT LIGHTHOUSES' REAL ROUTABLE IPs
- hosts:
- - <INTERNAL_IP>
-
- # remote_allow_list allows you to control ip ranges that this node will
- # consider when handshaking to another node. By default, any remote IPs are
- # allowed. You can provide CIDRs here with `true` to allow and `false` to
- # deny. The most specific CIDR rule applies to each remote. If all rules are
- # "allow", the default will be "deny", and vice-versa. If both "allow" and
- # "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the
- # default.
- #remote_allow_list:
- # Example to block IPs from this subnet from being used for remote IPs.
- #"172.16.0.0/12": false
-
- # A more complicated example, allow public IPs but only private IPs from a specific subnet
- #"0.0.0.0/0": true
- #"10.0.0.0/8": false
- #"10.42.42.0/24": true
-
- # local_allow_list allows you to filter which local IP addresses we advertise
- # to the lighthouses. This uses the same logic as `remote_allow_list`, but
- # additionally, you can specify an `interfaces` map of regular expressions
- # to match against interface names. The regexp must match the entire name.
- # All interface rules must be either true or false (and the default will be
- # the inverse). CIDR rules are matched after interface name rules.
- # Default is all local IP addresses.
- #local_allow_list:
- # Example to block tun0 and all docker interfaces.
- #interfaces:
- #tun0: false
- #'docker.*': false
- # Example to only advertise this subnet to the lighthouse.
- #"10.0.0.0/8": true
-
-# Port Nebula will be listening on. The default here is 4243. For a lighthouse node, the port should be defined,
-# however using port 0 will dynamically assign a port and is recommended for roaming nodes.
-listen:
- # To listen on both any ipv4 and ipv6 use "[::]"
- host: "[::]"
- port: 4243
- # Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg)
- # default is 64, does not support reload
- #batch: 64
- # Configure socket buffers for the udp side (outside), leave unset to use the system defaults. Values will be doubled by the kernel
- # Default is net.core.rmem_default and net.core.wmem_default (/proc/sys/net/core/rmem_default and /proc/sys/net/core/rmem_default)
- # Maximum is limited by memory in the system, SO_RCVBUFFORCE and SO_SNDBUFFORCE is used to avoid having to raise the system wide
- # max, net.core.rmem_max and net.core.wmem_max
- #read_buffer: 10485760
- #write_buffer: 10485760
-
-# EXPERIMENTAL: This option is currently only supported on linux and may
-# change in future minor releases.
-#
-# Routines is the number of thread pairs to run that consume from the tun and UDP queues.
-# Currently, this defaults to 1 which means we have 1 tun queue reader and 1
-# UDP queue reader. Setting this above one will set IFF_MULTI_QUEUE on the tun
-# device and SO_REUSEPORT on the UDP socket to allow multiple queues.
-#routines: 1
-
-punchy:
- # Continues to punch inbound/outbound at a regular interval to avoid expiration of firewall nat mappings
- punch: true
-
- # respond means that a node you are trying to reach will connect back out to you if your hole punching fails
- # this is extremely useful if one node is behind a difficult nat, such as a symmetric NAT
- # Default is false
- #respond: true
-
- # delays a punch response for misbehaving NATs, default is 1 second, respond must be true to take effect
- #delay: 1s
-
-# Cipher allows you to choose between the available ciphers for your network. Options are chachapoly or aes
-# IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously!
-cipher: chachapoly
-
-# Local range is used to define a hint about the local network range, which speeds up discovering the fastest
-# path to a network adjacent nebula node.
-#local_range: "172.16.0.0/24"
-
-# sshd can expose informational and administrative functions via ssh this is a
-#sshd:
- # Toggles the feature
- #enabled: true
- # Host and port to listen on, port 22 is not allowed for your safety
- #listen: 127.0.0.1:2222
- # A file containing the ssh host private key to use
- # A decent way to generate one: ssh-keygen -t ed25519 -f ssh_host_ed25519_key -N "" < /dev/null
- #host_key: ./ssh_host_ed25519_key
- # A file containing a list of authorized public keys
- #authorized_users:
- #- user: steeeeve
- # keys can be an array of strings or single string
- #keys:
- #- "ssh public key string"
-
-# Configure the private interface. Note: addr is baked into the nebula certificate
-tun:
- # When tun is disabled, a lighthouse can be started without a local tun interface (and therefore without root)
- disabled: false
- # Name of the device
- dev: nebula1
- # Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert
- drop_local_broadcast: false
- # Toggles forwarding of multicast packets
- drop_multicast: false
- # Sets the transmit queue length, if you notice lots of transmit drops on the tun it may help to raise this number. Default is 500
- tx_queue: 500
- # Default MTU for every packet, safe setting is (and the default) 1300 for internet based traffic
- mtu: 1300
- # Route based MTU overrides, you have known vpn ip paths that can support larger MTUs you can increase/decrease them here
- routes:
- #- mtu: 8800
- # route: 10.0.0.0/16
- # Unsafe routes allows you to route traffic over nebula to non-nebula nodes
- # Unsafe routes should be avoided unless you have hosts/services that cannot run nebula
- # NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate
- unsafe_routes:
- #- route: 172.16.1.0/24
- # via: 192.168.100.99
- # mtu: 1300 #mtu will default to tun mtu if this option is not sepcified
-
-
-# TODO
-# Configure logging level
-logging:
- # panic, fatal, error, warning, info, or debug. Default is info
- level: info
- # json or text formats currently available. Default is text
- format: text
- # Disable timestamp logging. useful when output is redirected to logging system that already adds timestamps. Default is false
- #disable_timestamp: true
- # timestamp format is specified in Go time format, see:
- # https://golang.org/pkg/time/#pkg-constants
- # default when `format: json`: "2006-01-02T15:04:05Z07:00" (RFC3339)
- # default when `format: text`:
- # when TTY attached: seconds since beginning of execution
- # otherwise: "2006-01-02T15:04:05Z07:00" (RFC3339)
- # As an example, to log as RFC3339 with millisecond precision, set to:
- #timestamp_format: "2006-01-02T15:04:05.000Z07:00"
-
-#stats:
- #type: graphite
- #prefix: nebula
- #protocol: tcp
- #host: 127.0.0.1:9999
- #interval: 10s
-
- #type: prometheus
- #listen: 127.0.0.1:8080
- #path: /metrics
- #namespace: prometheusns
- #subsystem: nebula
- #interval: 10s
-
- # enables counter metrics for meta packets
- # e.g.: `messages.tx.handshake`
- # NOTE: `message.{tx,rx}.recv_error` is always emitted
- #message_metrics: false
-
- # enables detailed counter metrics for lighthouse packets
- # e.g.: `lighthouse.rx.HostQuery`
- #lighthouse_metrics: false
-
-# Handshake Manager Settings
-#handshakes:
- # Handshakes are sent to all known addresses at each interval with a linear backoff,
- # Wait try_interval after the 1st attempt, 2 * try_interval after the 2nd, etc, until the handshake is older than timeout
- # A 100ms interval with the default 10 retries will give a handshake 5.5 seconds to resolve before timing out
- #try_interval: 100ms
- #retries: 20
- # trigger_buffer is the size of the buffer channel for quickly sending handshakes
- # after receiving the response for lighthouse queries
- #trigger_buffer: 64
-
-
-# Nebula security group configuration
-firewall:
- conntrack:
- tcp_timeout: 12m
- udp_timeout: 3m
- default_timeout: 10m
- max_connections: 100000
-
- # The firewall is default deny. There is no way to write a deny rule.
- # Rules are comprised of a protocol, port, and one or more of host, group, or CIDR
- # Logical evaluation is roughly: port AND proto AND (ca_sha OR ca_name) AND (host OR group OR groups OR cidr)
- # - port: Takes `0` or `any` as any, a single number `80`, a range `200-901`, or `fragment` to match second and further fragments of fragmented packets (since there is no port available).
- # code: same as port but makes more sense when talking about ICMP, TODO: this is not currently implemented in a way that works, use `any`
- # proto: `any`, `tcp`, `udp`, or `icmp`
- # host: `any` or a literal hostname, ie `test-host`
- # group: `any` or a literal group name, ie `default-group`
- # groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
- # cidr: a CIDR, `0.0.0.0/0` is any.
- # ca_name: An issuing CA name
- # ca_sha: An issuing CA shasum
-
- outbound:
- # Allow all outbound traffic from this node
- - port: any
- proto: any
- host: any
-
- inbound:
- - port: any
- proto: any
- host: any
diff --git a/charts/auth/templates/lighthouse-config.yaml b/charts/auth/templates/lighthouse-config.yaml
index 1b33be2..f4444e0 100644
--- a/charts/auth/templates/lighthouse-config.yaml
+++ b/charts/auth/templates/lighthouse-config.yaml
@@ -4,4 +4,250 @@
name: {{ .Values.ui.nebula.lighthouse.name }}
namespace: {{ .Release.Namespace }}
data:
-{{ (.Files.Glob "lighthouse.yaml").AsConfig | replace "<INTERNAL_IP>" .Values.ui.nebula.lighthouse.internalIP | replace "<EXTERNAL_IP>" .Values.ui.nebula.lighthouse.externalIP | replace "<PORT>" .Values.ui.nebula.lighthouse.port | indent 2 }}
+ lighthouse.yaml: |
+ # This is the nebula example configuration file. You must edit, at a minimum, the static_host_map, lighthouse, and firewall sections
+ # Some options in this file are HUPable, including the pki section. (A HUP will reload credentials from disk without affecting existing tunnels)
+
+ # PKI defines the location of credentials for this node. Each of these can also be inlined by using the yaml ": |" syntax.
+ pki:
+ # The CAs that are accepted by this node. Must contain one or more certificates created by 'nebula-cert ca'
+ ##ca: /etc/nebula/ca/ca.crt
+ ca: /etc/nebula/lighthouse/ca.crt
+ cert: /etc/nebula/lighthouse/host.crt
+ key: /etc/nebula/lighthouse/host.key
+ #blocklist is a list of certificate fingerprints that we will refuse to talk to
+ #blocklist:
+ # - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72
+
+ # The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
+ # A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
+ # The syntax is:
+ # "{nebula ip}": ["{routable ip/dns name}:{routable port}"]
+ # Example, if your lighthouse has the nebula IP of 192.168.100.1 and has the real ip address of 100.64.22.11 and runs on port 4243:
+ static_host_map:
+ "{{ .Values.ui.nebula.lighthouse.internalIP }}": ["{{ .Values.ui.nebula.lighthouse.externalIP }}>:{{ .Values.ui.nebula.lighthouse.port }}>"]
+
+
+ lighthouse:
+ # am_lighthouse is used to enable lighthouse functionality for a node. This should ONLY be true on nodes
+ # you have configured to be lighthouses in your network
+ am_lighthouse: false
+ # serve_dns optionally starts a dns listener that responds to various queries and can even be
+ # delegated to for resolution
+ #serve_dns: false
+ #dns:
+ # The DNS host defines the IP to bind the dns listener to. This also allows binding to the nebula node IP.
+ #host: 0.0.0.0
+ #port: 53
+ # interval is the number of seconds between updates from this node to a lighthouse.
+ # during updates, a node sends information about its current IP addresses to each node.
+ interval: 60
+ # hosts is a list of lighthouse hosts this node should report to and query from
+ # IMPORTANT: THIS SHOULD BE EMPTY ON LIGHTHOUSE NODES
+ # IMPORTANT2: THIS SHOULD BE LIGHTHOUSES' NEBULA IPs, NOT LIGHTHOUSES' REAL ROUTABLE IPs
+ hosts:
+ - {{ .Values.ui.nebula.lighthouse.internalIP }}
+
+ # remote_allow_list allows you to control ip ranges that this node will
+ # consider when handshaking to another node. By default, any remote IPs are
+ # allowed. You can provide CIDRs here with `true` to allow and `false` to
+ # deny. The most specific CIDR rule applies to each remote. If all rules are
+ # "allow", the default will be "deny", and vice-versa. If both "allow" and
+ # "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the
+ # default.
+ #remote_allow_list:
+ # Example to block IPs from this subnet from being used for remote IPs.
+ #"172.16.0.0/12": false
+
+ # A more complicated example, allow public IPs but only private IPs from a specific subnet
+ #"0.0.0.0/0": true
+ #"10.0.0.0/8": false
+ #"10.42.42.0/24": true
+
+ # local_allow_list allows you to filter which local IP addresses we advertise
+ # to the lighthouses. This uses the same logic as `remote_allow_list`, but
+ # additionally, you can specify an `interfaces` map of regular expressions
+ # to match against interface names. The regexp must match the entire name.
+ # All interface rules must be either true or false (and the default will be
+ # the inverse). CIDR rules are matched after interface name rules.
+ # Default is all local IP addresses.
+ #local_allow_list:
+ # Example to block tun0 and all docker interfaces.
+ #interfaces:
+ #tun0: false
+ #'docker.*': false
+ # Example to only advertise this subnet to the lighthouse.
+ #"10.0.0.0/8": true
+
+ # Port Nebula will be listening on. The default here is 4243. For a lighthouse node, the port should be defined,
+ # however using port 0 will dynamically assign a port and is recommended for roaming nodes.
+ listen:
+ # To listen on both any ipv4 and ipv6 use "[::]"
+ host: "[::]"
+ port: 4243
+ # Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg)
+ # default is 64, does not support reload
+ #batch: 64
+ # Configure socket buffers for the udp side (outside), leave unset to use the system defaults. Values will be doubled by the kernel
+ # Default is net.core.rmem_default and net.core.wmem_default (/proc/sys/net/core/rmem_default and /proc/sys/net/core/rmem_default)
+ # Maximum is limited by memory in the system, SO_RCVBUFFORCE and SO_SNDBUFFORCE is used to avoid having to raise the system wide
+ # max, net.core.rmem_max and net.core.wmem_max
+ #read_buffer: 10485760
+ #write_buffer: 10485760
+
+ # EXPERIMENTAL: This option is currently only supported on linux and may
+ # change in future minor releases.
+ #
+ # Routines is the number of thread pairs to run that consume from the tun and UDP queues.
+ # Currently, this defaults to 1 which means we have 1 tun queue reader and 1
+ # UDP queue reader. Setting this above one will set IFF_MULTI_QUEUE on the tun
+ # device and SO_REUSEPORT on the UDP socket to allow multiple queues.
+ #routines: 1
+
+ punchy:
+ # Continues to punch inbound/outbound at a regular interval to avoid expiration of firewall nat mappings
+ punch: true
+
+ # respond means that a node you are trying to reach will connect back out to you if your hole punching fails
+ # this is extremely useful if one node is behind a difficult nat, such as a symmetric NAT
+ # Default is false
+ #respond: true
+
+ # delays a punch response for misbehaving NATs, default is 1 second, respond must be true to take effect
+ #delay: 1s
+
+ # Cipher allows you to choose between the available ciphers for your network. Options are chachapoly or aes
+ # IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously!
+ cipher: chachapoly
+
+ # Local range is used to define a hint about the local network range, which speeds up discovering the fastest
+ # path to a network adjacent nebula node.
+ #local_range: "172.16.0.0/24"
+
+ # sshd can expose informational and administrative functions via ssh this is a
+ #sshd:
+ # Toggles the feature
+ #enabled: true
+ # Host and port to listen on, port 22 is not allowed for your safety
+ #listen: 127.0.0.1:2222
+ # A file containing the ssh host private key to use
+ # A decent way to generate one: ssh-keygen -t ed25519 -f ssh_host_ed25519_key -N "" < /dev/null
+ #host_key: ./ssh_host_ed25519_key
+ # A file containing a list of authorized public keys
+ #authorized_users:
+ #- user: steeeeve
+ # keys can be an array of strings or single string
+ #keys:
+ #- "ssh public key string"
+
+ # Configure the private interface. Note: addr is baked into the nebula certificate
+ tun:
+ # When tun is disabled, a lighthouse can be started without a local tun interface (and therefore without root)
+ disabled: false
+ # Name of the device
+ dev: nebula1
+ # Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert
+ drop_local_broadcast: false
+ # Toggles forwarding of multicast packets
+ drop_multicast: false
+ # Sets the transmit queue length, if you notice lots of transmit drops on the tun it may help to raise this number. Default is 500
+ tx_queue: 500
+ # Default MTU for every packet, safe setting is (and the default) 1300 for internet based traffic
+ mtu: 1300
+ # Route based MTU overrides, you have known vpn ip paths that can support larger MTUs you can increase/decrease them here
+ routes:
+ #- mtu: 8800
+ # route: 10.0.0.0/16
+ # Unsafe routes allows you to route traffic over nebula to non-nebula nodes
+ # Unsafe routes should be avoided unless you have hosts/services that cannot run nebula
+ # NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate
+ unsafe_routes:
+ #- route: 172.16.1.0/24
+ # via: 192.168.100.99
+ # mtu: 1300 #mtu will default to tun mtu if this option is not sepcified
+
+
+ # TODO
+ # Configure logging level
+ logging:
+ # panic, fatal, error, warning, info, or debug. Default is info
+ level: info
+ # json or text formats currently available. Default is text
+ format: text
+ # Disable timestamp logging. useful when output is redirected to logging system that already adds timestamps. Default is false
+ #disable_timestamp: true
+ # timestamp format is specified in Go time format, see:
+ # https://golang.org/pkg/time/#pkg-constants
+ # default when `format: json`: "2006-01-02T15:04:05Z07:00" (RFC3339)
+ # default when `format: text`:
+ # when TTY attached: seconds since beginning of execution
+ # otherwise: "2006-01-02T15:04:05Z07:00" (RFC3339)
+ # As an example, to log as RFC3339 with millisecond precision, set to:
+ #timestamp_format: "2006-01-02T15:04:05.000Z07:00"
+
+ #stats:
+ #type: graphite
+ #prefix: nebula
+ #protocol: tcp
+ #host: 127.0.0.1:9999
+ #interval: 10s
+
+ #type: prometheus
+ #listen: 127.0.0.1:8080
+ #path: /metrics
+ #namespace: prometheusns
+ #subsystem: nebula
+ #interval: 10s
+
+ # enables counter metrics for meta packets
+ # e.g.: `messages.tx.handshake`
+ # NOTE: `message.{tx,rx}.recv_error` is always emitted
+ #message_metrics: false
+
+ # enables detailed counter metrics for lighthouse packets
+ # e.g.: `lighthouse.rx.HostQuery`
+ #lighthouse_metrics: false
+
+ # Handshake Manager Settings
+ #handshakes:
+ # Handshakes are sent to all known addresses at each interval with a linear backoff,
+ # Wait try_interval after the 1st attempt, 2 * try_interval after the 2nd, etc, until the handshake is older than timeout
+ # A 100ms interval with the default 10 retries will give a handshake 5.5 seconds to resolve before timing out
+ #try_interval: 100ms
+ #retries: 20
+ # trigger_buffer is the size of the buffer channel for quickly sending handshakes
+ # after receiving the response for lighthouse queries
+ #trigger_buffer: 64
+
+
+ # Nebula security group configuration
+ firewall:
+ conntrack:
+ tcp_timeout: 12m
+ udp_timeout: 3m
+ default_timeout: 10m
+ max_connections: 100000
+
+ # The firewall is default deny. There is no way to write a deny rule.
+ # Rules are comprised of a protocol, port, and one or more of host, group, or CIDR
+ # Logical evaluation is roughly: port AND proto AND (ca_sha OR ca_name) AND (host OR group OR groups OR cidr)
+ # - port: Takes `0` or `any` as any, a single number `80`, a range `200-901`, or `fragment` to match second and further fragments of fragmented packets (since there is no port available).
+ # code: same as port but makes more sense when talking about ICMP, TODO: this is not currently implemented in a way that works, use `any`
+ # proto: `any`, `tcp`, `udp`, or `icmp`
+ # host: `any` or a literal hostname, ie `test-host`
+ # group: `any` or a literal group name, ie `default-group`
+ # groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
+ # cidr: a CIDR, `0.0.0.0/0` is any.
+ # ca_name: An issuing CA name
+ # ca_sha: An issuing CA shasum
+
+ outbound:
+ # Allow all outbound traffic from this node
+ - port: any
+ proto: any
+ host: any
+
+ inbound:
+ - port: any
+ proto: any
+ host: any
diff --git a/core/nebula/apis/nebula/v1/zz_generated.deepcopy.go b/core/nebula/apis/nebula/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..e47733d
--- /dev/null
+++ b/core/nebula/apis/nebula/v1/zz_generated.deepcopy.go
@@ -0,0 +1,197 @@
+// +build !ignore_autogenerated
+
+// gen
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NebulaCA) DeepCopyInto(out *NebulaCA) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NebulaCA.
+func (in *NebulaCA) DeepCopy() *NebulaCA {
+ if in == nil {
+ return nil
+ }
+ out := new(NebulaCA)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NebulaCA) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NebulaCAList) DeepCopyInto(out *NebulaCAList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]NebulaCA, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NebulaCAList.
+func (in *NebulaCAList) DeepCopy() *NebulaCAList {
+ if in == nil {
+ return nil
+ }
+ out := new(NebulaCAList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NebulaCAList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NebulaCASpec) DeepCopyInto(out *NebulaCASpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NebulaCASpec.
+func (in *NebulaCASpec) DeepCopy() *NebulaCASpec {
+ if in == nil {
+ return nil
+ }
+ out := new(NebulaCASpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NebulaCAStatus) DeepCopyInto(out *NebulaCAStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NebulaCAStatus.
+func (in *NebulaCAStatus) DeepCopy() *NebulaCAStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(NebulaCAStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NebulaNode) DeepCopyInto(out *NebulaNode) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NebulaNode.
+func (in *NebulaNode) DeepCopy() *NebulaNode {
+ if in == nil {
+ return nil
+ }
+ out := new(NebulaNode)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NebulaNode) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NebulaNodeList) DeepCopyInto(out *NebulaNodeList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]NebulaNode, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NebulaNodeList.
+func (in *NebulaNodeList) DeepCopy() *NebulaNodeList {
+ if in == nil {
+ return nil
+ }
+ out := new(NebulaNodeList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NebulaNodeList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NebulaNodeSpec) DeepCopyInto(out *NebulaNodeSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NebulaNodeSpec.
+func (in *NebulaNodeSpec) DeepCopy() *NebulaNodeSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(NebulaNodeSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NebulaNodeStatus) DeepCopyInto(out *NebulaNodeStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NebulaNodeStatus.
+func (in *NebulaNodeStatus) DeepCopy() *NebulaNodeStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(NebulaNodeStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/core/nebula/controller/Dockerfile.controller b/core/nebula/controller/Dockerfile.controller
new file mode 100644
index 0000000..fa0b2b7
--- /dev/null
+++ b/core/nebula/controller/Dockerfile.controller
@@ -0,0 +1,9 @@
+FROM alpine:latest
+
+COPY controller /usr/bin/nebula-controller
+RUN chmod +x /usr/bin/nebula-controller
+
+RUN wget https://github.com/slackhq/nebula/releases/download/v1.4.0/nebula-linux-arm64.tar.gz -O nebula.tar.gz
+RUN tar -xvf nebula.tar.gz
+RUN mv nebula-cert /usr/bin
+RUN chmod +x /usr/bin/nebula-cert
diff --git a/core/nebula/controller/Dockerfile.web b/core/nebula/controller/Dockerfile.web
new file mode 100644
index 0000000..a82f86b
--- /dev/null
+++ b/core/nebula/controller/Dockerfile.web
@@ -0,0 +1,4 @@
+FROM alpine:latest
+
+COPY web /usr/bin/nebula-web
+RUN chmod +x /usr/bin/nebula-web
diff --git a/core/nebula/controller/Makefile b/core/nebula/controller/Makefile
index 7fa4716..2104b79 100644
--- a/core/nebula/controller/Makefile
+++ b/core/nebula/controller/Makefile
@@ -5,24 +5,32 @@
rm -rf generated
./hack/generate.sh
-controller: clean
+controller: export CGO_ENABLED=0
+controller: export GO111MODULE=on
+controller: export GOOS=linux
+controller: export GOARCH=arm64
+controller:
go mod tidy
go mod vendor
go build -o controller main.go
-web: clean
+web: export CGO_ENABLED=0
+web: export GO111MODULE=on
+web: export GOOS=linux
+web: export GOARCH=arm64
+web:
go build -o web web.go
+image_controller: controller
+ docker build -f Dockerfile.controller --tag=giolekva/nebula-controller:latest . --platform=linux/arm64
-# image: clean build
-# docker build --tag=giolekva/rpuppy-arm .
+image_web: web
+ docker build -f Dockerfile.web --tag=giolekva/nebula-web:latest . --platform=linux/arm64
-# push: image
-# docker push giolekva/rpuppy-arm:latest
+push_controller: image_controller
+ docker push giolekva/nebula-controller:latest
+push_web: image_web
+ docker push giolekva/nebula-web:latest
-# push_arm64: export GOOS=linux
-# push_arm64: export GOARCH=arm64
-# push_arm64: export CGO_ENABLED=0
-# push_arm64: export GO111MODULE=on
-# push_arm64: push
+push_arm64: push_controller push_web
diff --git a/core/nebula/controller/apis/nebula/v1/types.go b/core/nebula/controller/apis/nebula/v1/types.go
index 0a295b7..c4e03f2 100644
--- a/core/nebula/controller/apis/nebula/v1/types.go
+++ b/core/nebula/controller/apis/nebula/v1/types.go
@@ -68,6 +68,7 @@
const (
NebulaNodeStateCreating NebulaNodeState = "Creating"
NebulaNodeStateReady NebulaNodeState = "Ready"
+ NebulaNodeStateError NebulaNodeState = "Error"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
diff --git a/core/nebula/controller/controllers/ca.go b/core/nebula/controller/controllers/ca.go
index f442db6..05b5e32 100644
--- a/core/nebula/controller/controllers/ca.go
+++ b/core/nebula/controller/controllers/ca.go
@@ -10,6 +10,7 @@
"time"
corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
@@ -144,12 +145,14 @@
c.workqueue.AddRateLimited(ref)
return fmt.Errorf("Error syncing '%s': %s, requeuing", ref.key, err.Error())
}
+ c.workqueue.Forget(o)
fmt.Printf("Successfully synced CA '%s'\n", ref.key)
} else if ref, ok := o.(nodeRef); ok {
if err := c.processNodeWithKey(ref.key); err != nil {
c.workqueue.AddRateLimited(ref)
return fmt.Errorf("Error syncing '%s': %s, requeuing", ref.key, err.Error())
}
+ c.workqueue.Forget(o)
fmt.Printf("Successfully synced Node '%s'\n", ref.key)
} else {
c.workqueue.Forget(o)
@@ -171,9 +174,13 @@
if err != nil {
return nil
}
- ca, err := c.getCA(namespace, name)
+ ca, err := c.caLister.NebulaCAs(namespace).Get(name)
if err != nil {
- panic(err)
+ if errors.IsNotFound(err) {
+ utilruntime.HandleError(fmt.Errorf("CA '%s' in work queue no longer exists", key))
+ return nil
+ }
+ return err
}
if ca.Status.State == nebulav1.NebulaCAStateReady {
fmt.Printf("%s CA is already in Ready state\n", ca.Name)
@@ -181,22 +188,22 @@
}
keyDir, err := generateCAKey(ca.Name, c.nebulaCert)
if err != nil {
- panic(err)
+ return err
}
defer os.RemoveAll(keyDir)
secret, err := createSecretFromDir(keyDir)
if err != nil {
- panic(err)
+ return err
}
secret.Immutable = &secretImmutable
secret.Name = ca.Spec.SecretName
_, err = c.kubeClient.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{})
if err != nil {
- panic(err)
+ return err
}
err = c.updateCAStatus(ca, nebulav1.NebulaCAStateReady, "Generated credentials")
if err != nil {
- panic(err)
+ return err
}
return nil
}
@@ -206,55 +213,65 @@
if err != nil {
return nil
}
- node, err := c.getNode(namespace, name)
+ node, err := c.nodeLister.NebulaNodes(namespace).Get(name)
if err != nil {
- panic(err)
+ if errors.IsNotFound(err) {
+ utilruntime.HandleError(fmt.Errorf("NebulaNode '%s' in work queue no longer exists", key))
+ return nil
+ }
+ return err
}
if node.Status.State == nebulav1.NebulaNodeStateReady {
fmt.Printf("%s Node is already in Ready state\n", node.Name)
return nil
}
- ca, err := c.getCA(node.Spec.CANamespace, node.Spec.CAName)
- if ca == nil || ca.Status.State != nebulav1.NebulaCAStateReady {
+ ca, err := c.caLister.NebulaCAs(node.Spec.CANamespace).Get(node.Spec.CAName)
+ if err != nil {
+ return err
+ }
+ if ca.Status.State != nebulav1.NebulaCAStateReady {
return fmt.Errorf("Referenced CA %s is not ready yet.", node.Spec.CAName)
}
- caSecret, err := c.getSecret(ca.Namespace, ca.Spec.SecretName)
+ caSecret, err := c.secretLister.Secrets(ca.Namespace).Get(ca.Spec.SecretName)
if err != nil {
- panic(err)
+ if errors.IsNotFound(err) {
+ c.updateNodeStatus(node, nebulav1.NebulaNodeStateError, "Could not find CA secret")
+ }
+ return err
}
dir, err := extractSecret(caSecret)
if err != nil {
- panic(err)
+ return err
}
if node.Spec.PubKey == "" {
if err := generateNodeKey(node.Name, node.Spec.IPCidr, dir, c.nebulaCert); err != nil {
- panic(err)
+ return err
}
} else {
if err := generateNodeKeyFromPub(node.Name, node.Spec.IPCidr, node.Spec.PubKey, dir, c.nebulaCert); err != nil {
- panic(err)
+ return err
}
}
defer os.RemoveAll(dir)
if err := os.Remove(filepath.Join(dir, "ca.key")); err != nil {
- panic(err)
+ return err
}
if err := os.Remove(filepath.Join(dir, "ca.png")); err != nil {
- panic(err)
+ return err
}
secret, err := createSecretFromDir(dir)
if err != nil {
- panic(err)
+ return err
}
secret.Immutable = &secretImmutable
secret.Name = node.Spec.SecretName
_, err = c.kubeClient.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{})
if err != nil {
- panic(err)
+ return err
}
err = c.updateNodeStatus(node, nebulav1.NebulaNodeStateReady, "Generated credentials")
if err != nil {
- panic(err)
+ return err
}
return nil
}
@@ -304,7 +321,7 @@
for name, data := range secret.Data {
if err := ioutil.WriteFile(filepath.Join(tmp, name), data, 0644); err != nil {
defer os.RemoveAll(tmp)
- return "", nil
+ return "", err
}
}
return tmp, nil
@@ -340,8 +357,8 @@
"-in-pub", hostPub,
"-out-crt", filepath.Join(dir, "host.crt"),
"-out-qr", filepath.Join(dir, "host.png"))
- if d, err := cmd.CombinedOutput(); err != nil {
- return fmt.Errorf(string(d))
+ if _, err := cmd.CombinedOutput(); err != nil {
+ return err
}
return nil
}
@@ -355,56 +372,8 @@
"-out-key", filepath.Join(dir, "host.key"),
"-out-crt", filepath.Join(dir, "host.crt"),
"-out-qr", filepath.Join(dir, "host.png"))
- if d, err := cmd.CombinedOutput(); err != nil {
- return fmt.Errorf(string(d))
+ if _, err := cmd.CombinedOutput(); err != nil {
+ return err
}
return nil
}
-
-func (c *NebulaController) getCA(namespace, name string) (*nebulav1.NebulaCA, error) {
- return c.caLister.NebulaCAs(namespace).Get(name)
- // s := labels.NewSelector()
- // r, err := labels.NewRequirement("metadata.namespace", selection.Equals, []string{namespace})
- // if err != nil {
- // panic(err)
- // }
- // r1, err := labels.NewRequirement("metadata.name", selection.Equals, []string{name})
- // if err != nil {
- // panic(err)
- // }
- // s.Add(*r, *r1)
- // ncas, err := c.caLister.List(s)
- // if err != nil {
- // panic(err)
- // }
- // if len(ncas) != 1 {
- // panic("err")
- // }
- // return ncas[0], nil
-}
-
-func (c *NebulaController) getNode(namespace, name string) (*nebulav1.NebulaNode, error) {
- return c.nodeLister.NebulaNodes(namespace).Get(name)
- // s := labels.NewSelector()
- // r, err := labels.NewRequirement("metadata.namespace", selection.Equals, []string{namespace})
- // if err != nil {
- // panic(err)
- // }
- // r1, err := labels.NewRequirement("metadata.name", selection.Equals, []string{name})
- // if err != nil {
- // panic(err)
- // }
- // s.Add(*r, *r1)
- // nodes, err := c.nodeLister.List(s)
- // if err != nil {
- // panic(err)
- // }
- // if len(nodes) != 1 {
- // panic("err")
- // }
- // return nodes[0], nil
-}
-
-func (c *NebulaController) getSecret(namespace, name string) (*corev1.Secret, error) {
- return c.secretLister.Secrets(namespace).Get(name)
-}
diff --git a/core/nebula/generated/clientset/versioned/clientset.go b/core/nebula/generated/clientset/versioned/clientset.go
new file mode 100644
index 0000000..32ba8fe
--- /dev/null
+++ b/core/nebula/generated/clientset/versioned/clientset.go
@@ -0,0 +1,83 @@
+// gen
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package versioned
+
+import (
+ "fmt"
+
+ lekvav1 "github.com/giolekva/pcloud/core/nebula/generated/clientset/versioned/typed/nebula/v1"
+ discovery "k8s.io/client-go/discovery"
+ rest "k8s.io/client-go/rest"
+ flowcontrol "k8s.io/client-go/util/flowcontrol"
+)
+
+type Interface interface {
+ Discovery() discovery.DiscoveryInterface
+ LekvaV1() lekvav1.LekvaV1Interface
+}
+
+// Clientset contains the clients for groups. Each group has exactly one
+// version included in a Clientset.
+type Clientset struct {
+ *discovery.DiscoveryClient
+ lekvaV1 *lekvav1.LekvaV1Client
+}
+
+// LekvaV1 retrieves the LekvaV1Client
+func (c *Clientset) LekvaV1() lekvav1.LekvaV1Interface {
+ return c.lekvaV1
+}
+
+// Discovery retrieves the DiscoveryClient
+func (c *Clientset) Discovery() discovery.DiscoveryInterface {
+ if c == nil {
+ return nil
+ }
+ return c.DiscoveryClient
+}
+
+// NewForConfig creates a new Clientset for the given config.
+// If config's RateLimiter is not set and QPS and Burst are acceptable,
+// NewForConfig will generate a rate-limiter in configShallowCopy.
+func NewForConfig(c *rest.Config) (*Clientset, error) {
+ configShallowCopy := *c
+ if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
+ if configShallowCopy.Burst <= 0 {
+ return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
+ }
+ configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
+ }
+ var cs Clientset
+ var err error
+ cs.lekvaV1, err = lekvav1.NewForConfig(&configShallowCopy)
+ if err != nil {
+ return nil, err
+ }
+
+ cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
+ if err != nil {
+ return nil, err
+ }
+ return &cs, nil
+}
+
+// NewForConfigOrDie creates a new Clientset for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *Clientset {
+ var cs Clientset
+ cs.lekvaV1 = lekvav1.NewForConfigOrDie(c)
+
+ cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
+ return &cs
+}
+
+// New creates a new Clientset for the given RESTClient.
+func New(c rest.Interface) *Clientset {
+ var cs Clientset
+ cs.lekvaV1 = lekvav1.New(c)
+
+ cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
+ return &cs
+}
diff --git a/core/nebula/generated/clientset/versioned/doc.go b/core/nebula/generated/clientset/versioned/doc.go
new file mode 100644
index 0000000..51f1905
--- /dev/null
+++ b/core/nebula/generated/clientset/versioned/doc.go
@@ -0,0 +1,6 @@
+// gen
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated clientset.
+package versioned
diff --git a/core/nebula/generated/clientset/versioned/fake/clientset_generated.go b/core/nebula/generated/clientset/versioned/fake/clientset_generated.go
new file mode 100644
index 0000000..b0512f0
--- /dev/null
+++ b/core/nebula/generated/clientset/versioned/fake/clientset_generated.go
@@ -0,0 +1,71 @@
+// gen
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ clientset "github.com/giolekva/pcloud/core/nebula/generated/clientset/versioned"
+ lekvav1 "github.com/giolekva/pcloud/core/nebula/generated/clientset/versioned/typed/nebula/v1"
+ fakelekvav1 "github.com/giolekva/pcloud/core/nebula/generated/clientset/versioned/typed/nebula/v1/fake"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/client-go/discovery"
+ fakediscovery "k8s.io/client-go/discovery/fake"
+ "k8s.io/client-go/testing"
+)
+
+// NewSimpleClientset returns a clientset that will respond with the provided objects.
+// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
+// without applying any validations and/or defaults. It shouldn't be considered a replacement
+// for a real clientset and is mostly useful in simple unit tests.
+func NewSimpleClientset(objects ...runtime.Object) *Clientset {
+ o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
+ for _, obj := range objects {
+ if err := o.Add(obj); err != nil {
+ panic(err)
+ }
+ }
+
+ cs := &Clientset{tracker: o}
+ cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
+ cs.AddReactor("*", "*", testing.ObjectReaction(o))
+ cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
+ gvr := action.GetResource()
+ ns := action.GetNamespace()
+ watch, err := o.Watch(gvr, ns)
+ if err != nil {
+ return false, nil, err
+ }
+ return true, watch, nil
+ })
+
+ return cs
+}
+
+// Clientset implements clientset.Interface. Meant to be embedded into a
+// struct to get a default implementation. This makes faking out just the method
+// you want to test easier.
+type Clientset struct {
+ testing.Fake
+ discovery *fakediscovery.FakeDiscovery
+ tracker testing.ObjectTracker
+}
+
+func (c *Clientset) Discovery() discovery.DiscoveryInterface {
+ return c.discovery
+}
+
+func (c *Clientset) Tracker() testing.ObjectTracker {
+ return c.tracker
+}
+
+var (
+ _ clientset.Interface = &Clientset{}
+ _ testing.FakeClient = &Clientset{}
+)
+
+// LekvaV1 retrieves the LekvaV1Client
+func (c *Clientset) LekvaV1() lekvav1.LekvaV1Interface {
+ return &fakelekvav1.FakeLekvaV1{Fake: &c.Fake}
+}
diff --git a/core/nebula/generated/clientset/versioned/fake/doc.go b/core/nebula/generated/clientset/versioned/fake/doc.go
new file mode 100644
index 0000000..82879ea
--- /dev/null
+++ b/core/nebula/generated/clientset/versioned/fake/doc.go
@@ -0,0 +1,6 @@
+// gen
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated fake clientset.
+package fake
diff --git a/core/nebula/generated/clientset/versioned/fake/register.go b/core/nebula/generated/clientset/versioned/fake/register.go
new file mode 100644
index 0000000..aa322cb
--- /dev/null
+++ b/core/nebula/generated/clientset/versioned/fake/register.go
@@ -0,0 +1,42 @@
+// gen
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ lekvav1 "github.com/giolekva/pcloud/core/nebula/apis/nebula/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var scheme = runtime.NewScheme()
+var codecs = serializer.NewCodecFactory(scheme)
+
+var localSchemeBuilder = runtime.SchemeBuilder{
+ lekvav1.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(scheme))
+}
diff --git a/core/nebula/generated/clientset/versioned/scheme/doc.go b/core/nebula/generated/clientset/versioned/scheme/doc.go
new file mode 100644
index 0000000..59bee07
--- /dev/null
+++ b/core/nebula/generated/clientset/versioned/scheme/doc.go
@@ -0,0 +1,6 @@
+// gen
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package contains the scheme of the automatically generated clientset.
+package scheme
diff --git a/core/nebula/generated/clientset/versioned/scheme/register.go b/core/nebula/generated/clientset/versioned/scheme/register.go
new file mode 100644
index 0000000..e27332c
--- /dev/null
+++ b/core/nebula/generated/clientset/versioned/scheme/register.go
@@ -0,0 +1,42 @@
+// gen
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package scheme
+
+import (
+ lekvav1 "github.com/giolekva/pcloud/core/nebula/apis/nebula/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var Scheme = runtime.NewScheme()
+var Codecs = serializer.NewCodecFactory(Scheme)
+var ParameterCodec = runtime.NewParameterCodec(Scheme)
+var localSchemeBuilder = runtime.SchemeBuilder{
+ lekvav1.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(Scheme))
+}
diff --git a/core/nebula/generated/clientset/versioned/typed/nebula/v1/doc.go b/core/nebula/generated/clientset/versioned/typed/nebula/v1/doc.go
new file mode 100644
index 0000000..8ec8188
--- /dev/null
+++ b/core/nebula/generated/clientset/versioned/typed/nebula/v1/doc.go
@@ -0,0 +1,6 @@
+// gen
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/core/nebula/generated/clientset/versioned/typed/nebula/v1/fake/doc.go b/core/nebula/generated/clientset/versioned/typed/nebula/v1/fake/doc.go
new file mode 100644
index 0000000..630d59b
--- /dev/null
+++ b/core/nebula/generated/clientset/versioned/typed/nebula/v1/fake/doc.go
@@ -0,0 +1,6 @@
+// gen
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/core/nebula/generated/clientset/versioned/typed/nebula/v1/fake/fake_nebula_client.go b/core/nebula/generated/clientset/versioned/typed/nebula/v1/fake/fake_nebula_client.go
new file mode 100644
index 0000000..e5f530e
--- /dev/null
+++ b/core/nebula/generated/clientset/versioned/typed/nebula/v1/fake/fake_nebula_client.go
@@ -0,0 +1,30 @@
+// gen
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1 "github.com/giolekva/pcloud/core/nebula/generated/clientset/versioned/typed/nebula/v1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeLekvaV1 struct {
+ *testing.Fake
+}
+
+func (c *FakeLekvaV1) NebulaCAs(namespace string) v1.NebulaCAInterface {
+ return &FakeNebulaCAs{c, namespace}
+}
+
+func (c *FakeLekvaV1) NebulaNodes(namespace string) v1.NebulaNodeInterface {
+ return &FakeNebulaNodes{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeLekvaV1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/core/nebula/generated/clientset/versioned/typed/nebula/v1/fake/fake_nebulaca.go b/core/nebula/generated/clientset/versioned/typed/nebula/v1/fake/fake_nebulaca.go
new file mode 100644
index 0000000..22182e2
--- /dev/null
+++ b/core/nebula/generated/clientset/versioned/typed/nebula/v1/fake/fake_nebulaca.go
@@ -0,0 +1,128 @@
+// gen
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ nebulav1 "github.com/giolekva/pcloud/core/nebula/apis/nebula/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeNebulaCAs implements NebulaCAInterface
+type FakeNebulaCAs struct {
+ Fake *FakeLekvaV1
+ ns string
+}
+
+var nebulacasResource = schema.GroupVersionResource{Group: "lekva.me", Version: "v1", Resource: "nebulacas"}
+
+var nebulacasKind = schema.GroupVersionKind{Group: "lekva.me", Version: "v1", Kind: "NebulaCA"}
+
+// Get takes name of the nebulaCA, and returns the corresponding nebulaCA object, and an error if there is any.
+func (c *FakeNebulaCAs) Get(ctx context.Context, name string, options v1.GetOptions) (result *nebulav1.NebulaCA, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(nebulacasResource, c.ns, name), &nebulav1.NebulaCA{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*nebulav1.NebulaCA), err
+}
+
+// List takes label and field selectors, and returns the list of NebulaCAs that match those selectors.
+func (c *FakeNebulaCAs) List(ctx context.Context, opts v1.ListOptions) (result *nebulav1.NebulaCAList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(nebulacasResource, nebulacasKind, c.ns, opts), &nebulav1.NebulaCAList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &nebulav1.NebulaCAList{ListMeta: obj.(*nebulav1.NebulaCAList).ListMeta}
+ for _, item := range obj.(*nebulav1.NebulaCAList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested nebulaCAs.
+func (c *FakeNebulaCAs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(nebulacasResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a nebulaCA and creates it. Returns the server's representation of the nebulaCA, and an error, if there is any.
+func (c *FakeNebulaCAs) Create(ctx context.Context, nebulaCA *nebulav1.NebulaCA, opts v1.CreateOptions) (result *nebulav1.NebulaCA, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(nebulacasResource, c.ns, nebulaCA), &nebulav1.NebulaCA{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*nebulav1.NebulaCA), err
+}
+
+// Update takes the representation of a nebulaCA and updates it. Returns the server's representation of the nebulaCA, and an error, if there is any.
+func (c *FakeNebulaCAs) Update(ctx context.Context, nebulaCA *nebulav1.NebulaCA, opts v1.UpdateOptions) (result *nebulav1.NebulaCA, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(nebulacasResource, c.ns, nebulaCA), &nebulav1.NebulaCA{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*nebulav1.NebulaCA), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeNebulaCAs) UpdateStatus(ctx context.Context, nebulaCA *nebulav1.NebulaCA, opts v1.UpdateOptions) (*nebulav1.NebulaCA, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(nebulacasResource, "status", c.ns, nebulaCA), &nebulav1.NebulaCA{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*nebulav1.NebulaCA), err
+}
+
+// Delete takes name of the nebulaCA and deletes it. Returns an error if one occurs.
+func (c *FakeNebulaCAs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(nebulacasResource, c.ns, name), &nebulav1.NebulaCA{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeNebulaCAs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(nebulacasResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &nebulav1.NebulaCAList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched nebulaCA.
+func (c *FakeNebulaCAs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *nebulav1.NebulaCA, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(nebulacasResource, c.ns, name, pt, data, subresources...), &nebulav1.NebulaCA{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*nebulav1.NebulaCA), err
+}
diff --git a/core/nebula/generated/clientset/versioned/typed/nebula/v1/fake/fake_nebulanode.go b/core/nebula/generated/clientset/versioned/typed/nebula/v1/fake/fake_nebulanode.go
new file mode 100644
index 0000000..cbd3957
--- /dev/null
+++ b/core/nebula/generated/clientset/versioned/typed/nebula/v1/fake/fake_nebulanode.go
@@ -0,0 +1,128 @@
+// gen
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ nebulav1 "github.com/giolekva/pcloud/core/nebula/apis/nebula/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeNebulaNodes implements NebulaNodeInterface
+type FakeNebulaNodes struct {
+ Fake *FakeLekvaV1
+ ns string
+}
+
+var nebulanodesResource = schema.GroupVersionResource{Group: "lekva.me", Version: "v1", Resource: "nebulanodes"}
+
+var nebulanodesKind = schema.GroupVersionKind{Group: "lekva.me", Version: "v1", Kind: "NebulaNode"}
+
+// Get takes name of the nebulaNode, and returns the corresponding nebulaNode object, and an error if there is any.
+func (c *FakeNebulaNodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *nebulav1.NebulaNode, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(nebulanodesResource, c.ns, name), &nebulav1.NebulaNode{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*nebulav1.NebulaNode), err
+}
+
+// List takes label and field selectors, and returns the list of NebulaNodes that match those selectors.
+func (c *FakeNebulaNodes) List(ctx context.Context, opts v1.ListOptions) (result *nebulav1.NebulaNodeList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(nebulanodesResource, nebulanodesKind, c.ns, opts), &nebulav1.NebulaNodeList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &nebulav1.NebulaNodeList{ListMeta: obj.(*nebulav1.NebulaNodeList).ListMeta}
+ for _, item := range obj.(*nebulav1.NebulaNodeList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested nebulaNodes.
+func (c *FakeNebulaNodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(nebulanodesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a nebulaNode and creates it. Returns the server's representation of the nebulaNode, and an error, if there is any.
+func (c *FakeNebulaNodes) Create(ctx context.Context, nebulaNode *nebulav1.NebulaNode, opts v1.CreateOptions) (result *nebulav1.NebulaNode, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(nebulanodesResource, c.ns, nebulaNode), &nebulav1.NebulaNode{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*nebulav1.NebulaNode), err
+}
+
+// Update takes the representation of a nebulaNode and updates it. Returns the server's representation of the nebulaNode, and an error, if there is any.
+func (c *FakeNebulaNodes) Update(ctx context.Context, nebulaNode *nebulav1.NebulaNode, opts v1.UpdateOptions) (result *nebulav1.NebulaNode, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(nebulanodesResource, c.ns, nebulaNode), &nebulav1.NebulaNode{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*nebulav1.NebulaNode), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeNebulaNodes) UpdateStatus(ctx context.Context, nebulaNode *nebulav1.NebulaNode, opts v1.UpdateOptions) (*nebulav1.NebulaNode, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(nebulanodesResource, "status", c.ns, nebulaNode), &nebulav1.NebulaNode{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*nebulav1.NebulaNode), err
+}
+
+// Delete takes name of the nebulaNode and deletes it. Returns an error if one occurs.
+func (c *FakeNebulaNodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(nebulanodesResource, c.ns, name), &nebulav1.NebulaNode{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeNebulaNodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(nebulanodesResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &nebulav1.NebulaNodeList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched nebulaNode.
+func (c *FakeNebulaNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *nebulav1.NebulaNode, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(nebulanodesResource, c.ns, name, pt, data, subresources...), &nebulav1.NebulaNode{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*nebulav1.NebulaNode), err
+}
diff --git a/core/nebula/generated/clientset/versioned/typed/nebula/v1/generated_expansion.go b/core/nebula/generated/clientset/versioned/typed/nebula/v1/generated_expansion.go
new file mode 100644
index 0000000..db9c80a
--- /dev/null
+++ b/core/nebula/generated/clientset/versioned/typed/nebula/v1/generated_expansion.go
@@ -0,0 +1,9 @@
+// gen
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+type NebulaCAExpansion interface{}
+
+type NebulaNodeExpansion interface{}
diff --git a/core/nebula/generated/clientset/versioned/typed/nebula/v1/nebula_client.go b/core/nebula/generated/clientset/versioned/typed/nebula/v1/nebula_client.go
new file mode 100644
index 0000000..261bb36
--- /dev/null
+++ b/core/nebula/generated/clientset/versioned/typed/nebula/v1/nebula_client.go
@@ -0,0 +1,80 @@
+// gen
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/giolekva/pcloud/core/nebula/apis/nebula/v1"
+ "github.com/giolekva/pcloud/core/nebula/generated/clientset/versioned/scheme"
+ rest "k8s.io/client-go/rest"
+)
+
+type LekvaV1Interface interface {
+ RESTClient() rest.Interface
+ NebulaCAsGetter
+ NebulaNodesGetter
+}
+
+// LekvaV1Client is used to interact with features provided by the lekva.me group.
+type LekvaV1Client struct {
+ restClient rest.Interface
+}
+
+func (c *LekvaV1Client) NebulaCAs(namespace string) NebulaCAInterface {
+ return newNebulaCAs(c, namespace)
+}
+
+func (c *LekvaV1Client) NebulaNodes(namespace string) NebulaNodeInterface {
+ return newNebulaNodes(c, namespace)
+}
+
+// NewForConfig creates a new LekvaV1Client for the given config.
+func NewForConfig(c *rest.Config) (*LekvaV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return &LekvaV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new LekvaV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *LekvaV1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new LekvaV1Client for the given RESTClient.
+func New(c rest.Interface) *LekvaV1Client {
+ return &LekvaV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *LekvaV1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/core/nebula/generated/clientset/versioned/typed/nebula/v1/nebulaca.go b/core/nebula/generated/clientset/versioned/typed/nebula/v1/nebulaca.go
new file mode 100644
index 0000000..11aa718
--- /dev/null
+++ b/core/nebula/generated/clientset/versioned/typed/nebula/v1/nebulaca.go
@@ -0,0 +1,181 @@
+// gen
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ v1 "github.com/giolekva/pcloud/core/nebula/apis/nebula/v1"
+ scheme "github.com/giolekva/pcloud/core/nebula/generated/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// NebulaCAsGetter has a method to return a NebulaCAInterface.
+// A group's client should implement this interface.
+type NebulaCAsGetter interface {
+ NebulaCAs(namespace string) NebulaCAInterface
+}
+
+// NebulaCAInterface has methods to work with NebulaCA resources.
+type NebulaCAInterface interface {
+ Create(ctx context.Context, nebulaCA *v1.NebulaCA, opts metav1.CreateOptions) (*v1.NebulaCA, error)
+ Update(ctx context.Context, nebulaCA *v1.NebulaCA, opts metav1.UpdateOptions) (*v1.NebulaCA, error)
+ UpdateStatus(ctx context.Context, nebulaCA *v1.NebulaCA, opts metav1.UpdateOptions) (*v1.NebulaCA, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.NebulaCA, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.NebulaCAList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NebulaCA, err error)
+ NebulaCAExpansion
+}
+
+// nebulaCAs implements NebulaCAInterface
+type nebulaCAs struct {
+ client rest.Interface
+ ns string
+}
+
+// newNebulaCAs returns a NebulaCAs
+func newNebulaCAs(c *LekvaV1Client, namespace string) *nebulaCAs {
+ return &nebulaCAs{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the nebulaCA, and returns the corresponding nebulaCA object, and an error if there is any.
+func (c *nebulaCAs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.NebulaCA, err error) {
+ result = &v1.NebulaCA{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("nebulacas").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of NebulaCAs that match those selectors.
+func (c *nebulaCAs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NebulaCAList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.NebulaCAList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("nebulacas").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested nebulaCAs.
+func (c *nebulaCAs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("nebulacas").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a nebulaCA and creates it. Returns the server's representation of the nebulaCA, and an error, if there is any.
+func (c *nebulaCAs) Create(ctx context.Context, nebulaCA *v1.NebulaCA, opts metav1.CreateOptions) (result *v1.NebulaCA, err error) {
+ result = &v1.NebulaCA{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("nebulacas").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(nebulaCA).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a nebulaCA and updates it. Returns the server's representation of the nebulaCA, and an error, if there is any.
+func (c *nebulaCAs) Update(ctx context.Context, nebulaCA *v1.NebulaCA, opts metav1.UpdateOptions) (result *v1.NebulaCA, err error) {
+ result = &v1.NebulaCA{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("nebulacas").
+ Name(nebulaCA.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(nebulaCA).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *nebulaCAs) UpdateStatus(ctx context.Context, nebulaCA *v1.NebulaCA, opts metav1.UpdateOptions) (result *v1.NebulaCA, err error) {
+ result = &v1.NebulaCA{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("nebulacas").
+ Name(nebulaCA.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(nebulaCA).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the nebulaCA and deletes it. Returns an error if one occurs.
+func (c *nebulaCAs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("nebulacas").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *nebulaCAs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("nebulacas").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched nebulaCA.
+func (c *nebulaCAs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NebulaCA, err error) {
+ result = &v1.NebulaCA{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("nebulacas").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/core/nebula/generated/clientset/versioned/typed/nebula/v1/nebulanode.go b/core/nebula/generated/clientset/versioned/typed/nebula/v1/nebulanode.go
new file mode 100644
index 0000000..4c38986
--- /dev/null
+++ b/core/nebula/generated/clientset/versioned/typed/nebula/v1/nebulanode.go
@@ -0,0 +1,181 @@
+// gen
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ v1 "github.com/giolekva/pcloud/core/nebula/apis/nebula/v1"
+ scheme "github.com/giolekva/pcloud/core/nebula/generated/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// NebulaNodesGetter has a method to return a NebulaNodeInterface.
+// A group's client should implement this interface.
+type NebulaNodesGetter interface {
+ NebulaNodes(namespace string) NebulaNodeInterface
+}
+
+// NebulaNodeInterface has methods to work with NebulaNode resources.
+type NebulaNodeInterface interface {
+ Create(ctx context.Context, nebulaNode *v1.NebulaNode, opts metav1.CreateOptions) (*v1.NebulaNode, error)
+ Update(ctx context.Context, nebulaNode *v1.NebulaNode, opts metav1.UpdateOptions) (*v1.NebulaNode, error)
+ UpdateStatus(ctx context.Context, nebulaNode *v1.NebulaNode, opts metav1.UpdateOptions) (*v1.NebulaNode, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.NebulaNode, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.NebulaNodeList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NebulaNode, err error)
+ NebulaNodeExpansion
+}
+
+// nebulaNodes implements NebulaNodeInterface
+type nebulaNodes struct {
+ client rest.Interface
+ ns string
+}
+
+// newNebulaNodes returns a NebulaNodes
+func newNebulaNodes(c *LekvaV1Client, namespace string) *nebulaNodes {
+ return &nebulaNodes{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the nebulaNode, and returns the corresponding nebulaNode object, and an error if there is any.
+func (c *nebulaNodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.NebulaNode, err error) {
+ result = &v1.NebulaNode{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("nebulanodes").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of NebulaNodes that match those selectors.
+func (c *nebulaNodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NebulaNodeList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.NebulaNodeList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("nebulanodes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested nebulaNodes.
+func (c *nebulaNodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("nebulanodes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a nebulaNode and creates it. Returns the server's representation of the nebulaNode, and an error, if there is any.
+func (c *nebulaNodes) Create(ctx context.Context, nebulaNode *v1.NebulaNode, opts metav1.CreateOptions) (result *v1.NebulaNode, err error) {
+ result = &v1.NebulaNode{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("nebulanodes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(nebulaNode).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a nebulaNode and updates it. Returns the server's representation of the nebulaNode, and an error, if there is any.
+func (c *nebulaNodes) Update(ctx context.Context, nebulaNode *v1.NebulaNode, opts metav1.UpdateOptions) (result *v1.NebulaNode, err error) {
+ result = &v1.NebulaNode{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("nebulanodes").
+ Name(nebulaNode.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(nebulaNode).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *nebulaNodes) UpdateStatus(ctx context.Context, nebulaNode *v1.NebulaNode, opts metav1.UpdateOptions) (result *v1.NebulaNode, err error) {
+ result = &v1.NebulaNode{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("nebulanodes").
+ Name(nebulaNode.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(nebulaNode).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the nebulaNode and deletes it. Returns an error if one occurs.
+func (c *nebulaNodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("nebulanodes").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *nebulaNodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("nebulanodes").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched nebulaNode.
+func (c *nebulaNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NebulaNode, err error) {
+ result = &v1.NebulaNode{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("nebulanodes").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/core/nebula/generated/informers/externalversions/factory.go b/core/nebula/generated/informers/externalversions/factory.go
new file mode 100644
index 0000000..eef24ab
--- /dev/null
+++ b/core/nebula/generated/informers/externalversions/factory.go
@@ -0,0 +1,166 @@
+// gen
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package externalversions
+
+import (
+ reflect "reflect"
+ sync "sync"
+ time "time"
+
+ versioned "github.com/giolekva/pcloud/core/nebula/generated/clientset/versioned"
+ internalinterfaces "github.com/giolekva/pcloud/core/nebula/generated/informers/externalversions/internalinterfaces"
+ nebula "github.com/giolekva/pcloud/core/nebula/generated/informers/externalversions/nebula"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// SharedInformerOption defines the functional option type for SharedInformerFactory.
+type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
+
+type sharedInformerFactory struct {
+ client versioned.Interface
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ lock sync.Mutex
+ defaultResync time.Duration
+ customResync map[reflect.Type]time.Duration
+
+ informers map[reflect.Type]cache.SharedIndexInformer
+ // startedInformers is used for tracking which informers have been started.
+ // This allows Start() to be called multiple times safely.
+ startedInformers map[reflect.Type]bool
+}
+
+// WithCustomResyncConfig sets a custom resync period for the specified informer types.
+func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
+ return func(factory *sharedInformerFactory) *sharedInformerFactory {
+ for k, v := range resyncConfig {
+ factory.customResync[reflect.TypeOf(k)] = v
+ }
+ return factory
+ }
+}
+
+// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
+func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
+ return func(factory *sharedInformerFactory) *sharedInformerFactory {
+ factory.tweakListOptions = tweakListOptions
+ return factory
+ }
+}
+
+// WithNamespace limits the SharedInformerFactory to the specified namespace.
+func WithNamespace(namespace string) SharedInformerOption {
+ return func(factory *sharedInformerFactory) *sharedInformerFactory {
+ factory.namespace = namespace
+ return factory
+ }
+}
+
+// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
+func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
+ return NewSharedInformerFactoryWithOptions(client, defaultResync)
+}
+
+// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
+// Listers obtained via this SharedInformerFactory will be subject to the same filters
+// as specified here.
+// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
+func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
+ return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
+}
+
+// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
+func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
+ factory := &sharedInformerFactory{
+ client: client,
+ namespace: v1.NamespaceAll,
+ defaultResync: defaultResync,
+ informers: make(map[reflect.Type]cache.SharedIndexInformer),
+ startedInformers: make(map[reflect.Type]bool),
+ customResync: make(map[reflect.Type]time.Duration),
+ }
+
+ // Apply all options
+ for _, opt := range options {
+ factory = opt(factory)
+ }
+
+ return factory
+}
+
+// Start initializes all requested informers.
+func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ for informerType, informer := range f.informers {
+ if !f.startedInformers[informerType] {
+ go informer.Run(stopCh)
+ f.startedInformers[informerType] = true
+ }
+ }
+}
+
+// WaitForCacheSync waits for all started informers' cache were synced.
+func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
+ informers := func() map[reflect.Type]cache.SharedIndexInformer {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ informers := map[reflect.Type]cache.SharedIndexInformer{}
+ for informerType, informer := range f.informers {
+ if f.startedInformers[informerType] {
+ informers[informerType] = informer
+ }
+ }
+ return informers
+ }()
+
+ res := map[reflect.Type]bool{}
+ for informType, informer := range informers {
+ res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
+ }
+ return res
+}
+
+// InternalInformerFor returns the SharedIndexInformer for obj using an internal
+// client.
+func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ informerType := reflect.TypeOf(obj)
+ informer, exists := f.informers[informerType]
+ if exists {
+ return informer
+ }
+
+ resyncPeriod, exists := f.customResync[informerType]
+ if !exists {
+ resyncPeriod = f.defaultResync
+ }
+
+ informer = newFunc(f.client, resyncPeriod)
+ f.informers[informerType] = informer
+
+ return informer
+}
+
+// SharedInformerFactory provides shared informers for resources in all known
+// API group versions.
+type SharedInformerFactory interface {
+ internalinterfaces.SharedInformerFactory
+ ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
+ WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
+
+ Lekva() nebula.Interface
+}
+
+func (f *sharedInformerFactory) Lekva() nebula.Interface {
+ return nebula.New(f, f.namespace, f.tweakListOptions)
+}
diff --git a/core/nebula/generated/informers/externalversions/generic.go b/core/nebula/generated/informers/externalversions/generic.go
new file mode 100644
index 0000000..9df56e4
--- /dev/null
+++ b/core/nebula/generated/informers/externalversions/generic.go
@@ -0,0 +1,50 @@
+// gen
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package externalversions
+
+import (
+ "fmt"
+
+ v1 "github.com/giolekva/pcloud/core/nebula/apis/nebula/v1"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// GenericInformer is type of SharedIndexInformer which will locate and delegate to other
+// sharedInformers based on type
+type GenericInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() cache.GenericLister
+}
+
+type genericInformer struct {
+ informer cache.SharedIndexInformer
+ resource schema.GroupResource
+}
+
+// Informer returns the SharedIndexInformer.
+func (f *genericInformer) Informer() cache.SharedIndexInformer {
+ return f.informer
+}
+
+// Lister returns the GenericLister.
+func (f *genericInformer) Lister() cache.GenericLister {
+ return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)
+}
+
+// ForResource gives generic access to a shared informer of the matching type
+// TODO extend this to unknown resources with a client pool
+func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
+ switch resource {
+ // Group=lekva.me, Version=v1
+ case v1.SchemeGroupVersion.WithResource("nebulacas"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Lekva().V1().NebulaCAs().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("nebulanodes"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Lekva().V1().NebulaNodes().Informer()}, nil
+
+ }
+
+ return nil, fmt.Errorf("no informer found for %v", resource)
+}
diff --git a/core/nebula/generated/informers/externalversions/internalinterfaces/factory_interfaces.go b/core/nebula/generated/informers/externalversions/internalinterfaces/factory_interfaces.go
new file mode 100644
index 0000000..602d1ad
--- /dev/null
+++ b/core/nebula/generated/informers/externalversions/internalinterfaces/factory_interfaces.go
@@ -0,0 +1,26 @@
+// gen
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package internalinterfaces
+
+import (
+ time "time"
+
+ versioned "github.com/giolekva/pcloud/core/nebula/generated/clientset/versioned"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.
+type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer
+
+// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
+type SharedInformerFactory interface {
+ Start(stopCh <-chan struct{})
+ InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
+}
+
+// TweakListOptionsFunc is a function that transforms a v1.ListOptions.
+type TweakListOptionsFunc func(*v1.ListOptions)
diff --git a/core/nebula/generated/informers/externalversions/nebula/interface.go b/core/nebula/generated/informers/externalversions/nebula/interface.go
new file mode 100644
index 0000000..6061070
--- /dev/null
+++ b/core/nebula/generated/informers/externalversions/nebula/interface.go
@@ -0,0 +1,32 @@
+// gen
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package nebula
+
+import (
+ internalinterfaces "github.com/giolekva/pcloud/core/nebula/generated/informers/externalversions/internalinterfaces"
+ v1 "github.com/giolekva/pcloud/core/nebula/generated/informers/externalversions/nebula/v1"
+)
+
+// Interface provides access to each of this group's versions.
+type Interface interface {
+ // V1 provides access to shared informers for resources in V1.
+ V1() v1.Interface
+}
+
+type group struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// V1 returns a new v1.Interface.
+func (g *group) V1() v1.Interface {
+ return v1.New(g.factory, g.namespace, g.tweakListOptions)
+}
diff --git a/core/nebula/generated/informers/externalversions/nebula/v1/interface.go b/core/nebula/generated/informers/externalversions/nebula/v1/interface.go
new file mode 100644
index 0000000..eb7fc27
--- /dev/null
+++ b/core/nebula/generated/informers/externalversions/nebula/v1/interface.go
@@ -0,0 +1,38 @@
+// gen
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ internalinterfaces "github.com/giolekva/pcloud/core/nebula/generated/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to all the informers in this group version.
+type Interface interface {
+ // NebulaCAs returns a NebulaCAInformer.
+ NebulaCAs() NebulaCAInformer
+ // NebulaNodes returns a NebulaNodeInformer.
+ NebulaNodes() NebulaNodeInformer
+}
+
+type version struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// NebulaCAs returns a NebulaCAInformer.
+func (v *version) NebulaCAs() NebulaCAInformer {
+ return &nebulaCAInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
+
+// NebulaNodes returns a NebulaNodeInformer.
+func (v *version) NebulaNodes() NebulaNodeInformer {
+ return &nebulaNodeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
diff --git a/core/nebula/generated/informers/externalversions/nebula/v1/nebulaca.go b/core/nebula/generated/informers/externalversions/nebula/v1/nebulaca.go
new file mode 100644
index 0000000..eda6a7a
--- /dev/null
+++ b/core/nebula/generated/informers/externalversions/nebula/v1/nebulaca.go
@@ -0,0 +1,76 @@
+// gen
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ nebulav1 "github.com/giolekva/pcloud/core/nebula/apis/nebula/v1"
+ versioned "github.com/giolekva/pcloud/core/nebula/generated/clientset/versioned"
+ internalinterfaces "github.com/giolekva/pcloud/core/nebula/generated/informers/externalversions/internalinterfaces"
+ v1 "github.com/giolekva/pcloud/core/nebula/generated/listers/nebula/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// NebulaCAInformer provides access to a shared informer and lister for
+// NebulaCAs.
+type NebulaCAInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.NebulaCALister
+}
+
+type nebulaCAInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewNebulaCAInformer constructs a new informer for NebulaCA type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewNebulaCAInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredNebulaCAInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredNebulaCAInformer constructs a new informer for NebulaCA type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredNebulaCAInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.LekvaV1().NebulaCAs(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.LekvaV1().NebulaCAs(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &nebulav1.NebulaCA{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *nebulaCAInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredNebulaCAInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *nebulaCAInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&nebulav1.NebulaCA{}, f.defaultInformer)
+}
+
+func (f *nebulaCAInformer) Lister() v1.NebulaCALister {
+ return v1.NewNebulaCALister(f.Informer().GetIndexer())
+}
diff --git a/core/nebula/generated/informers/externalversions/nebula/v1/nebulanode.go b/core/nebula/generated/informers/externalversions/nebula/v1/nebulanode.go
new file mode 100644
index 0000000..b3d3c65
--- /dev/null
+++ b/core/nebula/generated/informers/externalversions/nebula/v1/nebulanode.go
@@ -0,0 +1,76 @@
+// gen
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ nebulav1 "github.com/giolekva/pcloud/core/nebula/apis/nebula/v1"
+ versioned "github.com/giolekva/pcloud/core/nebula/generated/clientset/versioned"
+ internalinterfaces "github.com/giolekva/pcloud/core/nebula/generated/informers/externalversions/internalinterfaces"
+ v1 "github.com/giolekva/pcloud/core/nebula/generated/listers/nebula/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// NebulaNodeInformer provides access to a shared informer and lister for
+// NebulaNodes.
+type NebulaNodeInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.NebulaNodeLister
+}
+
+type nebulaNodeInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewNebulaNodeInformer constructs a new informer for NebulaNode type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewNebulaNodeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredNebulaNodeInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredNebulaNodeInformer constructs a new informer for NebulaNode type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredNebulaNodeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.LekvaV1().NebulaNodes(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.LekvaV1().NebulaNodes(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &nebulav1.NebulaNode{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *nebulaNodeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredNebulaNodeInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *nebulaNodeInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&nebulav1.NebulaNode{}, f.defaultInformer)
+}
+
+func (f *nebulaNodeInformer) Lister() v1.NebulaNodeLister {
+ return v1.NewNebulaNodeLister(f.Informer().GetIndexer())
+}
diff --git a/core/nebula/generated/listers/nebula/v1/expansion_generated.go b/core/nebula/generated/listers/nebula/v1/expansion_generated.go
new file mode 100644
index 0000000..8b4d0e8
--- /dev/null
+++ b/core/nebula/generated/listers/nebula/v1/expansion_generated.go
@@ -0,0 +1,21 @@
+// gen
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+// NebulaCAListerExpansion allows custom methods to be added to
+// NebulaCALister.
+type NebulaCAListerExpansion interface{}
+
+// NebulaCANamespaceListerExpansion allows custom methods to be added to
+// NebulaCANamespaceLister.
+type NebulaCANamespaceListerExpansion interface{}
+
+// NebulaNodeListerExpansion allows custom methods to be added to
+// NebulaNodeLister.
+type NebulaNodeListerExpansion interface{}
+
+// NebulaNodeNamespaceListerExpansion allows custom methods to be added to
+// NebulaNodeNamespaceLister.
+type NebulaNodeNamespaceListerExpansion interface{}
diff --git a/core/nebula/generated/listers/nebula/v1/nebulaca.go b/core/nebula/generated/listers/nebula/v1/nebulaca.go
new file mode 100644
index 0000000..a261c9d
--- /dev/null
+++ b/core/nebula/generated/listers/nebula/v1/nebulaca.go
@@ -0,0 +1,85 @@
+// gen
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/giolekva/pcloud/core/nebula/apis/nebula/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// NebulaCALister helps list NebulaCAs.
+// All objects returned here must be treated as read-only.
+type NebulaCALister interface {
+ // List lists all NebulaCAs in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.NebulaCA, err error)
+ // NebulaCAs returns an object that can list and get NebulaCAs.
+ NebulaCAs(namespace string) NebulaCANamespaceLister
+ NebulaCAListerExpansion
+}
+
+// nebulaCALister implements the NebulaCALister interface.
+type nebulaCALister struct {
+ indexer cache.Indexer
+}
+
+// NewNebulaCALister returns a new NebulaCALister.
+func NewNebulaCALister(indexer cache.Indexer) NebulaCALister {
+ return &nebulaCALister{indexer: indexer}
+}
+
+// List lists all NebulaCAs in the indexer.
+func (s *nebulaCALister) List(selector labels.Selector) (ret []*v1.NebulaCA, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.NebulaCA))
+ })
+ return ret, err
+}
+
+// NebulaCAs returns an object that can list and get NebulaCAs.
+func (s *nebulaCALister) NebulaCAs(namespace string) NebulaCANamespaceLister {
+ return nebulaCANamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// NebulaCANamespaceLister helps list and get NebulaCAs.
+// All objects returned here must be treated as read-only.
+type NebulaCANamespaceLister interface {
+ // List lists all NebulaCAs in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.NebulaCA, err error)
+ // Get retrieves the NebulaCA from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.NebulaCA, error)
+ NebulaCANamespaceListerExpansion
+}
+
+// nebulaCANamespaceLister implements the NebulaCANamespaceLister
+// interface.
+type nebulaCANamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all NebulaCAs in the indexer for a given namespace.
+func (s nebulaCANamespaceLister) List(selector labels.Selector) (ret []*v1.NebulaCA, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.NebulaCA))
+ })
+ return ret, err
+}
+
+// Get retrieves the NebulaCA from the indexer for a given namespace and name.
+func (s nebulaCANamespaceLister) Get(name string) (*v1.NebulaCA, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("nebulaca"), name)
+ }
+ return obj.(*v1.NebulaCA), nil
+}
diff --git a/core/nebula/generated/listers/nebula/v1/nebulanode.go b/core/nebula/generated/listers/nebula/v1/nebulanode.go
new file mode 100644
index 0000000..f861e18
--- /dev/null
+++ b/core/nebula/generated/listers/nebula/v1/nebulanode.go
@@ -0,0 +1,85 @@
+// gen
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/giolekva/pcloud/core/nebula/apis/nebula/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// NebulaNodeLister helps list NebulaNodes.
+// All objects returned here must be treated as read-only.
+type NebulaNodeLister interface {
+ // List lists all NebulaNodes in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.NebulaNode, err error)
+ // NebulaNodes returns an object that can list and get NebulaNodes.
+ NebulaNodes(namespace string) NebulaNodeNamespaceLister
+ NebulaNodeListerExpansion
+}
+
+// nebulaNodeLister implements the NebulaNodeLister interface.
+type nebulaNodeLister struct {
+ indexer cache.Indexer
+}
+
+// NewNebulaNodeLister returns a new NebulaNodeLister.
+func NewNebulaNodeLister(indexer cache.Indexer) NebulaNodeLister {
+ return &nebulaNodeLister{indexer: indexer}
+}
+
+// List lists all NebulaNodes in the indexer.
+func (s *nebulaNodeLister) List(selector labels.Selector) (ret []*v1.NebulaNode, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.NebulaNode))
+ })
+ return ret, err
+}
+
+// NebulaNodes returns an object that can list and get NebulaNodes.
+func (s *nebulaNodeLister) NebulaNodes(namespace string) NebulaNodeNamespaceLister {
+ return nebulaNodeNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// NebulaNodeNamespaceLister helps list and get NebulaNodes.
+// All objects returned here must be treated as read-only.
+type NebulaNodeNamespaceLister interface {
+ // List lists all NebulaNodes in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.NebulaNode, err error)
+ // Get retrieves the NebulaNode from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.NebulaNode, error)
+ NebulaNodeNamespaceListerExpansion
+}
+
+// nebulaNodeNamespaceLister implements the NebulaNodeNamespaceLister
+// interface.
+type nebulaNodeNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all NebulaNodes in the indexer for a given namespace.
+func (s nebulaNodeNamespaceLister) List(selector labels.Selector) (ret []*v1.NebulaNode, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.NebulaNode))
+ })
+ return ret, err
+}
+
+// Get retrieves the NebulaNode from the indexer for a given namespace and name.
+func (s nebulaNodeNamespaceLister) Get(name string) (*v1.NebulaNode, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("nebulanode"), name)
+ }
+ return obj.(*v1.NebulaNode), nil
+}
diff --git a/helmfile/base/helmfile.yaml b/helmfile/base/helmfile.yaml
index c8b7cbe..276096e 100644
--- a/helmfile/base/helmfile.yaml
+++ b/helmfile/base/helmfile.yaml
@@ -199,6 +199,20 @@
- adminService:
name: hydra # IGNORED
port: 80
+- name: nebula-controller
+ chart: ../../charts/nebula
+ namespace: {{ .Values.name }}-nebula-controller
+ values:
+ - controller:
+ image:
+ repository: giolekva/nebula-controller
+ tag: latest
+ pullPolicy: Always
+ - manage:
+ image:
+ repository: giolekva/nebula-web
+ tag: latest
+ pullPolicy: Always
environments:
prod:
diff --git a/helmfile/users/helmfile.yaml b/helmfile/users/helmfile.yaml
index df53071..567d3e0 100644
--- a/helmfile/users/helmfile.yaml
+++ b/helmfile/users/helmfile.yaml
@@ -342,7 +342,6 @@
- hosts:
- hydra.{{ .Values.domain }}
secretName: cert-hydra.{{ .Values.domain }}
- # secretName: cert-wildcard.{{ .Values.domain }}
secret:
enabled: true
maester: