| Giorgi Lekveishvili | e009a5d | 2024-01-05 14:10:11 +0400 | [diff] [blame] | 1 | expose: |
| 2 | # Set how to expose the service. Set the type as "ingress", "clusterIP", "nodePort" or "loadBalancer" |
| 3 | # and fill the information in the corresponding section |
| 4 | type: ingress |
| 5 | tls: |
| 6 | # Enable TLS or not. |
| 7 | # Delete the "ssl-redirect" annotations in "expose.ingress.annotations" when TLS is disabled and "expose.type" is "ingress" |
| 8 | # Note: if the "expose.type" is "ingress" and TLS is disabled, |
| 9 | # the port must be included in the command when pulling/pushing images. |
| 10 | # Refer to https://github.com/goharbor/harbor/issues/5291 for details. |
| 11 | enabled: true |
| 12 | # The source of the tls certificate. Set as "auto", "secret" |
| 13 | # or "none" and fill the information in the corresponding section |
| 14 | # 1) auto: generate the tls certificate automatically |
| 15 | # 2) secret: read the tls certificate from the specified secret. |
| 16 | # The tls certificate can be generated manually or by cert manager |
| 17 | # 3) none: configure no tls certificate for the ingress. If the default |
| 18 | # tls certificate is configured in the ingress controller, choose this option |
| 19 | certSource: auto |
| 20 | auto: |
| 21 | # The common name used to generate the certificate, it's necessary |
| 22 | # when the type isn't "ingress" |
| 23 | commonName: "" |
| 24 | secret: |
| 25 | # The name of secret which contains keys named: |
| 26 | # "tls.crt" - the certificate |
| 27 | # "tls.key" - the private key |
| 28 | secretName: "" |
| 29 | ingress: |
| 30 | hosts: |
| 31 | core: harbor.t46.lekva.me |
| 32 | # set to the type of ingress controller if it has specific requirements. |
| 33 | # leave as `default` for most ingress controllers. |
| 34 | # set to `gce` if using the GCE ingress controller |
| 35 | # set to `ncp` if using the NCP (NSX-T Container Plugin) ingress controller |
| 36 | # set to `alb` if using the ALB ingress controller |
| 37 | # set to `f5-bigip` if using the F5 BIG-IP ingress controller |
| 38 | controller: default |
| 39 | ## Allow .Capabilities.KubeVersion.Version to be overridden while creating ingress |
| 40 | kubeVersionOverride: "" |
| 41 | className: dodo-ingress-public |
| 42 | annotations: |
| 43 | # note different ingress controllers may require a different ssl-redirect annotation |
| 44 | # for Envoy, use ingress.kubernetes.io/force-ssl-redirect: "true" and remove the nginx lines below |
| 45 | ingress.kubernetes.io/ssl-redirect: "true" |
| 46 | ingress.kubernetes.io/proxy-body-size: "0" |
| 47 | nginx.ingress.kubernetes.io/ssl-redirect: "true" |
| 48 | nginx.ingress.kubernetes.io/proxy-body-size: "0" |
| 49 | acme.cert-manager.io/http01-edit-in-place: "true" |
| 50 | cert-manager.io/cluster-issuer: vhrb-public |
| 51 | harbor: |
| 52 | # harbor ingress-specific annotations |
| 53 | annotations: {} |
| 54 | # harbor ingress-specific labels |
| 55 | labels: {} |
| 56 | clusterIP: |
| 57 | # The name of ClusterIP service |
| 58 | name: harbor |
| 59 | # The ip address of the ClusterIP service (leave empty for acquiring dynamic ip) |
| 60 | staticClusterIP: "" |
| 61 | # Annotations on the ClusterIP service |
| 62 | annotations: {} |
| 63 | ports: |
| 64 | # The service port Harbor listens on when serving HTTP |
| 65 | httpPort: 80 |
| 66 | # The service port Harbor listens on when serving HTTPS |
| 67 | httpsPort: 443 |
| 68 | nodePort: |
| 69 | # The name of NodePort service |
| 70 | name: harbor |
| 71 | ports: |
| 72 | http: |
| 73 | # The service port Harbor listens on when serving HTTP |
| 74 | port: 80 |
| 75 | # The node port Harbor listens on when serving HTTP |
| 76 | nodePort: 30002 |
| 77 | https: |
| 78 | # The service port Harbor listens on when serving HTTPS |
| 79 | port: 443 |
| 80 | # The node port Harbor listens on when serving HTTPS |
| 81 | nodePort: 30003 |
| 82 | loadBalancer: |
| 83 | # The name of LoadBalancer service |
| 84 | name: harbor |
| 85 | # Set the IP if the LoadBalancer supports assigning IP |
| 86 | IP: "" |
| 87 | ports: |
| 88 | # The service port Harbor listens on when serving HTTP |
| 89 | httpPort: 80 |
| 90 | # The service port Harbor listens on when serving HTTPS |
| 91 | httpsPort: 443 |
| 92 | annotations: {} |
| 93 | sourceRanges: [] |
| 94 | |
| 95 | # The external URL for Harbor core service. It is used to |
| 96 | # 1) populate the docker/helm commands showed on portal |
| 97 | # 2) populate the token service URL returned to docker client |
| 98 | # |
| 99 | # Format: protocol://domain[:port]. Usually: |
| 100 | # 1) if "expose.type" is "ingress", the "domain" should be |
| 101 | # the value of "expose.ingress.hosts.core" |
| 102 | # 2) if "expose.type" is "clusterIP", the "domain" should be |
| 103 | # the value of "expose.clusterIP.name" |
| 104 | # 3) if "expose.type" is "nodePort", the "domain" should be |
| 105 | # the IP address of k8s node |
| 106 | # |
| 107 | # If Harbor is deployed behind the proxy, set it as the URL of proxy |
| 108 | externalURL: https://harbor.t46.lekva.me |
| 109 | |
| 110 | # The internal TLS used for harbor components secure communicating. In order to enable https |
| 111 | # in each component tls cert files need to provided in advance. |
| 112 | internalTLS: |
| 113 | # If internal TLS enabled |
| 114 | enabled: false |
| 115 | # enable strong ssl ciphers (default: false) |
| 116 | strong_ssl_ciphers: false |
| 117 | # There are three ways to provide tls |
| 118 | # 1) "auto" will generate cert automatically |
| 119 | # 2) "manual" need provide cert file manually in following value |
| 120 | # 3) "secret" internal certificates from secret |
| 121 | certSource: "auto" |
| 122 | # The content of trust ca, only available when `certSource` is "manual" |
| 123 | trustCa: "" |
| 124 | # core related cert configuration |
| 125 | core: |
| 126 | # secret name for core's tls certs |
| 127 | secretName: "" |
| 128 | # Content of core's TLS cert file, only available when `certSource` is "manual" |
| 129 | crt: "" |
| 130 | # Content of core's TLS key file, only available when `certSource` is "manual" |
| 131 | key: "" |
| 132 | # jobservice related cert configuration |
| 133 | jobservice: |
| 134 | # secret name for jobservice's tls certs |
| 135 | secretName: "" |
| 136 | # Content of jobservice's TLS key file, only available when `certSource` is "manual" |
| 137 | crt: "" |
| 138 | # Content of jobservice's TLS key file, only available when `certSource` is "manual" |
| 139 | key: "" |
| 140 | # registry related cert configuration |
| 141 | registry: |
| 142 | # secret name for registry's tls certs |
| 143 | secretName: "" |
| 144 | # Content of registry's TLS key file, only available when `certSource` is "manual" |
| 145 | crt: "" |
| 146 | # Content of registry's TLS key file, only available when `certSource` is "manual" |
| 147 | key: "" |
| 148 | # portal related cert configuration |
| 149 | portal: |
| 150 | # secret name for portal's tls certs |
| 151 | secretName: "" |
| 152 | # Content of portal's TLS key file, only available when `certSource` is "manual" |
| 153 | crt: "" |
| 154 | # Content of portal's TLS key file, only available when `certSource` is "manual" |
| 155 | key: "" |
| 156 | # trivy related cert configuration |
| 157 | trivy: |
| 158 | # secret name for trivy's tls certs |
| 159 | secretName: "" |
| 160 | # Content of trivy's TLS key file, only available when `certSource` is "manual" |
| 161 | crt: "" |
| 162 | # Content of trivy's TLS key file, only available when `certSource` is "manual" |
| 163 | key: "" |
| 164 | |
| 165 | ipFamily: |
| 166 | # ipv6Enabled set to true if ipv6 is enabled in cluster, currently it affected the nginx related component |
| 167 | ipv6: |
| 168 | enabled: false |
| 169 | # ipv4Enabled set to true if ipv4 is enabled in cluster, currently it affected the nginx related component |
| 170 | ipv4: |
| 171 | enabled: true |
| 172 | |
| 173 | # The persistence is enabled by default and a default StorageClass |
| 174 | # is needed in the k8s cluster to provision volumes dynamically. |
| 175 | # Specify another StorageClass in the "storageClass" or set "existingClaim" |
| 176 | # if you already have existing persistent volumes to use |
| 177 | # |
| 178 | # For storing images and charts, you can also use "azure", "gcs", "s3", |
| 179 | # "swift" or "oss". Set it in the "imageChartStorage" section |
| 180 | persistence: |
| 181 | enabled: true |
| 182 | # Setting it to "keep" to avoid removing PVCs during a helm delete |
| 183 | # operation. Leaving it empty will delete PVCs after the chart deleted |
| 184 | # (this does not apply for PVCs that are created for internal database |
| 185 | # and redis components, i.e. they are never deleted automatically) |
| 186 | resourcePolicy: "keep" |
| 187 | persistentVolumeClaim: |
| 188 | registry: |
| 189 | # Use the existing PVC which must be created manually before bound, |
| 190 | # and specify the "subPath" if the PVC is shared with other components |
| 191 | existingClaim: "" |
| 192 | # Specify the "storageClass" used to provision the volume. Or the default |
| 193 | # StorageClass will be used (the default). |
| 194 | # Set it to "-" to disable dynamic provisioning |
| 195 | storageClass: "" |
| 196 | subPath: "" |
| 197 | accessMode: ReadWriteOnce |
| 198 | size: 5Gi |
| 199 | annotations: {} |
| 200 | jobservice: |
| 201 | jobLog: |
| 202 | existingClaim: "" |
| 203 | storageClass: "" |
| 204 | subPath: "" |
| 205 | accessMode: ReadWriteOnce |
| 206 | size: 1Gi |
| 207 | annotations: {} |
| 208 | # If external database is used, the following settings for database will |
| 209 | # be ignored |
| 210 | database: |
| 211 | existingClaim: "" |
| 212 | storageClass: "" |
| 213 | subPath: "" |
| 214 | accessMode: ReadWriteOnce |
| 215 | size: 1Gi |
| 216 | annotations: {} |
| 217 | # If external Redis is used, the following settings for Redis will |
| 218 | # be ignored |
| 219 | redis: |
| 220 | existingClaim: "" |
| 221 | storageClass: "" |
| 222 | subPath: "" |
| 223 | accessMode: ReadWriteOnce |
| 224 | size: 1Gi |
| 225 | annotations: {} |
| 226 | trivy: |
| 227 | existingClaim: "" |
| 228 | storageClass: "" |
| 229 | subPath: "" |
| 230 | accessMode: ReadWriteOnce |
| 231 | size: 5Gi |
| 232 | annotations: {} |
| 233 | # Define which storage backend is used for registry to store |
| 234 | # images and charts. Refer to |
| 235 | # https://github.com/distribution/distribution/blob/main/docs/configuration.md#storage |
| 236 | # for the detail. |
| 237 | imageChartStorage: |
| 238 | # Specify whether to disable `redirect` for images and chart storage, for |
| 239 | # backends which not supported it (such as using minio for `s3` storage type), please disable |
| 240 | # it. To disable redirects, simply set `disableredirect` to `true` instead. |
| 241 | # Refer to |
| 242 | # https://github.com/distribution/distribution/blob/main/docs/configuration.md#redirect |
| 243 | # for the detail. |
| 244 | disableredirect: false |
| 245 | # Specify the "caBundleSecretName" if the storage service uses a self-signed certificate. |
| 246 | # The secret must contain keys named "ca.crt" which will be injected into the trust store |
| 247 | # of registry's containers. |
| 248 | # caBundleSecretName: |
| 249 | |
| 250 | # Specify the type of storage: "filesystem", "azure", "gcs", "s3", "swift", |
| 251 | # "oss" and fill the information needed in the corresponding section. The type |
| 252 | # must be "filesystem" if you want to use persistent volumes for registry |
| 253 | type: filesystem |
| 254 | filesystem: |
| 255 | rootdirectory: /storage |
| 256 | #maxthreads: 100 |
| 257 | |
| 258 | imagePullPolicy: IfNotPresent |
| 259 | |
| 260 | # Use this set to assign a list of default pullSecrets |
| 261 | imagePullSecrets: |
| 262 | # - name: docker-registry-secret |
| 263 | # - name: internal-registry-secret |
| 264 | |
| 265 | # The update strategy for deployments with persistent volumes(jobservice, registry): "RollingUpdate" or "Recreate" |
| 266 | # Set it as "Recreate" when "RWM" for volumes isn't supported |
| 267 | updateStrategy: |
| 268 | type: RollingUpdate |
| 269 | |
| 270 | # debug, info, warning, error or fatal |
| 271 | logLevel: info |
| 272 | |
| 273 | # The initial password of Harbor admin. Change it from portal after launching Harbor |
| 274 | # or give an existing secret for it |
| 275 | # key in secret is given via (default to HARBOR_ADMIN_PASSWORD) |
| 276 | # existingSecretAdminPassword: |
| 277 | existingSecretAdminPasswordKey: HARBOR_ADMIN_PASSWORD |
| 278 | harborAdminPassword: "Harbor12345" |
| 279 | |
| 280 | # The name of the secret which contains key named "ca.crt". Setting this enables the |
| 281 | # download link on portal to download the CA certificate when the certificate isn't |
| 282 | # generated automatically |
| 283 | caSecretName: "" |
| 284 | |
| 285 | # The secret key used for encryption. Must be a string of 16 chars. |
| 286 | secretKey: "not-a-secure-key" |
| 287 | # If using existingSecretSecretKey, the key must be secretKey |
| 288 | existingSecretSecretKey: "" |
| 289 | |
| 290 | # The proxy settings for updating trivy vulnerabilities from the Internet and replicating |
| 291 | # artifacts from/to the registries that cannot be reached directly |
| 292 | proxy: |
| 293 | httpProxy: |
| 294 | httpsProxy: |
| 295 | noProxy: 127.0.0.1,localhost,.local,.internal |
| 296 | components: |
| 297 | - core |
| 298 | - jobservice |
| 299 | - trivy |
| 300 | |
| 301 | # Run the migration job via helm hook |
| 302 | enableMigrateHelmHook: false |
| 303 | |
| 304 | # The custom ca bundle secret, the secret must contain key named "ca.crt" |
| 305 | # which will be injected into the trust store for core, jobservice, registry, trivy components |
| 306 | # caBundleSecretName: "" |
| 307 | |
| 308 | ## UAA Authentication Options |
| 309 | # If you're using UAA for authentication behind a self-signed |
| 310 | # certificate you will need to provide the CA Cert. |
| 311 | # Set uaaSecretName below to provide a pre-created secret that |
| 312 | # contains a base64 encoded CA Certificate named `ca.crt`. |
| 313 | # uaaSecretName: |
| 314 | |
| 315 | # If service exposed via "ingress", the Nginx will not be used |
| 316 | nginx: |
| 317 | image: |
| 318 | repository: goharbor/nginx-photon |
| 319 | tag: v2.10.0 |
| 320 | # set the service account to be used, default if left empty |
| 321 | serviceAccountName: "" |
| 322 | # mount the service account token |
| 323 | automountServiceAccountToken: false |
| 324 | replicas: 1 |
| 325 | revisionHistoryLimit: 10 |
| 326 | # resources: |
| 327 | # requests: |
| 328 | # memory: 256Mi |
| 329 | # cpu: 100m |
| 330 | extraEnvVars: [] |
| 331 | nodeSelector: {} |
| 332 | tolerations: [] |
| 333 | affinity: {} |
| 334 | # Spread Pods across failure-domains like regions, availability zones or nodes |
| 335 | topologySpreadConstraints: [] |
| 336 | # - maxSkew: 1 |
| 337 | # topologyKey: topology.kubernetes.io/zone |
| 338 | # nodeTaintsPolicy: Honor |
| 339 | # whenUnsatisfiable: DoNotSchedule |
| 340 | ## Additional deployment annotations |
| 341 | podAnnotations: {} |
| 342 | ## Additional deployment labels |
| 343 | podLabels: {} |
| 344 | ## The priority class to run the pod as |
| 345 | priorityClassName: |
| 346 | |
| 347 | portal: |
| 348 | image: |
| 349 | repository: goharbor/harbor-portal |
| 350 | tag: v2.10.0 |
| 351 | # set the service account to be used, default if left empty |
| 352 | serviceAccountName: "" |
| 353 | # mount the service account token |
| 354 | automountServiceAccountToken: false |
| 355 | replicas: 1 |
| 356 | revisionHistoryLimit: 10 |
| 357 | # resources: |
| 358 | # requests: |
| 359 | # memory: 256Mi |
| 360 | # cpu: 100m |
| 361 | extraEnvVars: [] |
| 362 | nodeSelector: {} |
| 363 | tolerations: [] |
| 364 | affinity: {} |
| 365 | # Spread Pods across failure-domains like regions, availability zones or nodes |
| 366 | topologySpreadConstraints: [] |
| 367 | # - maxSkew: 1 |
| 368 | # topologyKey: topology.kubernetes.io/zone |
| 369 | # nodeTaintsPolicy: Honor |
| 370 | # whenUnsatisfiable: DoNotSchedule |
| 371 | ## Additional deployment annotations |
| 372 | podAnnotations: {} |
| 373 | ## Additional deployment labels |
| 374 | podLabels: {} |
| 375 | ## Additional service annotations |
| 376 | serviceAnnotations: {} |
| 377 | ## The priority class to run the pod as |
| 378 | priorityClassName: |
| 379 | |
| 380 | core: |
| 381 | image: |
| 382 | repository: goharbor/harbor-core |
| 383 | tag: v2.10.0 |
| 384 | # set the service account to be used, default if left empty |
| 385 | serviceAccountName: "" |
| 386 | # mount the service account token |
| 387 | automountServiceAccountToken: false |
| 388 | replicas: 1 |
| 389 | revisionHistoryLimit: 10 |
| 390 | ## Startup probe values |
| 391 | startupProbe: |
| 392 | enabled: true |
| 393 | initialDelaySeconds: 10 |
| 394 | # resources: |
| 395 | # requests: |
| 396 | # memory: 256Mi |
| 397 | # cpu: 100m |
| 398 | extraEnvVars: [] |
| 399 | nodeSelector: {} |
| 400 | tolerations: [] |
| 401 | affinity: {} |
| 402 | # Spread Pods across failure-domains like regions, availability zones or nodes |
| 403 | topologySpreadConstraints: [] |
| 404 | # - maxSkew: 1 |
| 405 | # topologyKey: topology.kubernetes.io/zone |
| 406 | # nodeTaintsPolicy: Honor |
| 407 | # whenUnsatisfiable: DoNotSchedule |
| 408 | ## Additional deployment annotations |
| 409 | podAnnotations: {} |
| 410 | ## Additional deployment labels |
| 411 | podLabels: {} |
| 412 | ## Additional service annotations |
| 413 | serviceAnnotations: {} |
| 414 | ## User settings configuration json string |
| 415 | configureUserSettings: |
| 416 | # The provider for updating project quota(usage), there are 2 options, redis or db. |
| 417 | # By default it is implemented by db but you can configure it to redis which |
| 418 | # can improve the performance of high concurrent pushing to the same project, |
| 419 | # and reduce the database connections spike and occupies. |
| 420 | # Using redis will bring up some delay for quota usage updation for display, so only |
| 421 | # suggest switch provider to redis if you were ran into the db connections spike around |
| 422 | # the scenario of high concurrent pushing to same project, no improvment for other scenes. |
| 423 | quotaUpdateProvider: db # Or redis |
| 424 | # Secret is used when core server communicates with other components. |
| 425 | # If a secret key is not specified, Helm will generate one. Alternatively set existingSecret to use an existing secret |
| 426 | # Must be a string of 16 chars. |
| 427 | secret: "" |
| 428 | # Fill in the name of a kubernetes secret if you want to use your own |
| 429 | # If using existingSecret, the key must be secret |
| 430 | existingSecret: "" |
| 431 | # Fill the name of a kubernetes secret if you want to use your own |
| 432 | # TLS certificate and private key for token encryption/decryption. |
| 433 | # The secret must contain keys named: |
| 434 | # "tls.key" - the private key |
| 435 | # "tls.crt" - the certificate |
| 436 | secretName: "" |
| 437 | # If not specifying a preexisting secret, a secret can be created from tokenKey and tokenCert and used instead. |
| 438 | # If none of secretName, tokenKey, and tokenCert are specified, an ephemeral key and certificate will be autogenerated. |
| 439 | # tokenKey and tokenCert must BOTH be set or BOTH unset. |
| 440 | # The tokenKey value is formatted as a multiline string containing a PEM-encoded RSA key, indented one more than tokenKey on the following line. |
| 441 | tokenKey: | |
| 442 | # If tokenKey is set, the value of tokenCert must be set as a PEM-encoded certificate signed by tokenKey, and supplied as a multiline string, indented one more than tokenCert on the following line. |
| 443 | tokenCert: | |
| 444 | # The XSRF key. Will be generated automatically if it isn't specified |
| 445 | xsrfKey: "" |
| 446 | # If using existingSecret, the key is defined by core.existingXsrfSecretKey |
| 447 | existingXsrfSecret: "" |
| 448 | # If using existingSecret, the key |
| 449 | existingXsrfSecretKey: CSRF_KEY |
| 450 | ## The priority class to run the pod as |
| 451 | priorityClassName: |
| 452 | # The time duration for async update artifact pull_time and repository |
| 453 | # pull_count, the unit is second. Will be 10 seconds if it isn't set. |
| 454 | # eg. artifactPullAsyncFlushDuration: 10 |
| 455 | artifactPullAsyncFlushDuration: |
| 456 | gdpr: |
| 457 | deleteUser: false |
| 458 | |
| 459 | jobservice: |
| 460 | image: |
| 461 | repository: goharbor/harbor-jobservice |
| 462 | tag: v2.10.0 |
| 463 | replicas: 1 |
| 464 | revisionHistoryLimit: 10 |
| 465 | # set the service account to be used, default if left empty |
| 466 | serviceAccountName: "" |
| 467 | # mount the service account token |
| 468 | automountServiceAccountToken: false |
| 469 | maxJobWorkers: 10 |
| 470 | # The logger for jobs: "file", "database" or "stdout" |
| 471 | jobLoggers: |
| 472 | - file |
| 473 | # - database |
| 474 | # - stdout |
| 475 | # The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`) |
| 476 | loggerSweeperDuration: 14 #days |
| 477 | notification: |
| 478 | webhook_job_max_retry: 3 |
| 479 | webhook_job_http_client_timeout: 3 # in seconds |
| 480 | reaper: |
| 481 | # the max time to wait for a task to finish, if unfinished after max_update_hours, the task will be mark as error, but the task will continue to run, default value is 24 |
| 482 | max_update_hours: 24 |
| 483 | # the max time for execution in running state without new task created |
| 484 | max_dangling_hours: 168 |
| 485 | |
| 486 | # resources: |
| 487 | # requests: |
| 488 | # memory: 256Mi |
| 489 | # cpu: 100m |
| 490 | extraEnvVars: [] |
| 491 | nodeSelector: {} |
| 492 | tolerations: [] |
| 493 | affinity: {} |
| 494 | # Spread Pods across failure-domains like regions, availability zones or nodes |
| 495 | topologySpreadConstraints: |
| 496 | # - maxSkew: 1 |
| 497 | # topologyKey: topology.kubernetes.io/zone |
| 498 | # nodeTaintsPolicy: Honor |
| 499 | # whenUnsatisfiable: DoNotSchedule |
| 500 | ## Additional deployment annotations |
| 501 | podAnnotations: {} |
| 502 | ## Additional deployment labels |
| 503 | podLabels: {} |
| 504 | # Secret is used when job service communicates with other components. |
| 505 | # If a secret key is not specified, Helm will generate one. |
| 506 | # Must be a string of 16 chars. |
| 507 | secret: "" |
| 508 | # Use an existing secret resource |
| 509 | existingSecret: "" |
| 510 | # Key within the existing secret for the job service secret |
| 511 | existingSecretKey: JOBSERVICE_SECRET |
| 512 | ## The priority class to run the pod as |
| 513 | priorityClassName: |
| 514 | |
| 515 | registry: |
| 516 | # set the service account to be used, default if left empty |
| 517 | serviceAccountName: "" |
| 518 | # mount the service account token |
| 519 | automountServiceAccountToken: false |
| 520 | registry: |
| 521 | image: |
| 522 | repository: goharbor/registry-photon |
| 523 | tag: v2.10.0 |
| 524 | # resources: |
| 525 | # requests: |
| 526 | # memory: 256Mi |
| 527 | # cpu: 100m |
| 528 | extraEnvVars: [] |
| 529 | controller: |
| 530 | image: |
| 531 | repository: goharbor/harbor-registryctl |
| 532 | tag: dev |
| 533 | |
| 534 | # resources: |
| 535 | # requests: |
| 536 | # memory: 256Mi |
| 537 | # cpu: 100m |
| 538 | extraEnvVars: [] |
| 539 | replicas: 1 |
| 540 | revisionHistoryLimit: 10 |
| 541 | nodeSelector: {} |
| 542 | tolerations: [] |
| 543 | affinity: {} |
| 544 | # Spread Pods across failure-domains like regions, availability zones or nodes |
| 545 | topologySpreadConstraints: [] |
| 546 | # - maxSkew: 1 |
| 547 | # topologyKey: topology.kubernetes.io/zone |
| 548 | # nodeTaintsPolicy: Honor |
| 549 | # whenUnsatisfiable: DoNotSchedule |
| 550 | ## Additional deployment annotations |
| 551 | podAnnotations: {} |
| 552 | ## Additional deployment labels |
| 553 | podLabels: {} |
| 554 | ## The priority class to run the pod as |
| 555 | priorityClassName: |
| 556 | # Secret is used to secure the upload state from client |
| 557 | # and registry storage backend. |
| 558 | # See: https://github.com/distribution/distribution/blob/main/docs/configuration.md#http |
| 559 | # If a secret key is not specified, Helm will generate one. |
| 560 | # Must be a string of 16 chars. |
| 561 | secret: "" |
| 562 | # Use an existing secret resource |
| 563 | existingSecret: "" |
| 564 | # Key within the existing secret for the registry service secret |
| 565 | existingSecretKey: REGISTRY_HTTP_SECRET |
| 566 | # If true, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL. |
| 567 | relativeurls: false |
| 568 | credentials: |
| 569 | username: "harbor_registry_user" |
| 570 | password: "harbor_registry_password" |
| 571 | # If using existingSecret, the key must be REGISTRY_PASSWD and REGISTRY_HTPASSWD |
| 572 | existingSecret: "" |
| 573 | # Login and password in htpasswd string format. Excludes `registry.credentials.username` and `registry.credentials.password`. May come in handy when integrating with tools like argocd or flux. This allows the same line to be generated each time the template is rendered, instead of the `htpasswd` function from helm, which generates different lines each time because of the salt. |
| 574 | # htpasswdString: $apr1$XLefHzeG$Xl4.s00sMSCCcMyJljSZb0 # example string |
| 575 | htpasswdString: "" |
| 576 | middleware: |
| 577 | enabled: false |
| 578 | type: cloudFront |
| 579 | cloudFront: |
| 580 | baseurl: example.cloudfront.net |
| 581 | keypairid: KEYPAIRID |
| 582 | duration: 3000s |
| 583 | ipfilteredby: none |
| 584 | # The secret key that should be present is CLOUDFRONT_KEY_DATA, which should be the encoded private key |
| 585 | # that allows access to CloudFront |
| 586 | privateKeySecret: "my-secret" |
| 587 | # enable purge _upload directories |
| 588 | upload_purging: |
| 589 | enabled: true |
| 590 | # remove files in _upload directories which exist for a period of time, default is one week. |
| 591 | age: 168h |
| 592 | # the interval of the purge operations |
| 593 | interval: 24h |
| 594 | dryrun: false |
| 595 | |
| 596 | trivy: |
| 597 | # enabled the flag to enable Trivy scanner |
| 598 | enabled: true |
| 599 | image: |
| 600 | # repository the repository for Trivy adapter image |
| 601 | repository: goharbor/trivy-adapter-photon |
| 602 | # tag the tag for Trivy adapter image |
| 603 | tag: dev |
| 604 | # set the service account to be used, default if left empty |
| 605 | serviceAccountName: "" |
| 606 | # mount the service account token |
| 607 | automountServiceAccountToken: false |
| 608 | # replicas the number of Pod replicas |
| 609 | replicas: 1 |
| 610 | # debugMode the flag to enable Trivy debug mode with more verbose scanning log |
| 611 | debugMode: false |
| 612 | # vulnType a comma-separated list of vulnerability types. Possible values are `os` and `library`. |
| 613 | vulnType: "os,library" |
| 614 | # severity a comma-separated list of severities to be checked |
| 615 | severity: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL" |
| 616 | # ignoreUnfixed the flag to display only fixed vulnerabilities |
| 617 | ignoreUnfixed: false |
| 618 | # insecure the flag to skip verifying registry certificate |
| 619 | insecure: false |
| 620 | # gitHubToken the GitHub access token to download Trivy DB |
| 621 | # |
| 622 | # Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases. |
| 623 | # It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached |
| 624 | # in the local file system (`/home/scanner/.cache/trivy/db/trivy.db`). In addition, the database contains the update |
| 625 | # timestamp so Trivy can detect whether it should download a newer version from the Internet or use the cached one. |
| 626 | # Currently, the database is updated every 12 hours and published as a new release to GitHub. |
| 627 | # |
| 628 | # Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough |
| 629 | # for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000 |
| 630 | # requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult |
| 631 | # https://developer.github.com/v3/#rate-limiting |
| 632 | # |
| 633 | # You can create a GitHub token by following the instructions in |
| 634 | # https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line |
| 635 | gitHubToken: "" |
| 636 | # skipUpdate the flag to disable Trivy DB downloads from GitHub |
| 637 | # |
| 638 | # You might want to set the value of this flag to `true` in test or CI/CD environments to avoid GitHub rate limiting issues. |
| 639 | # If the value is set to `true` you have to manually download the `trivy.db` file and mount it in the |
| 640 | # `/home/scanner/.cache/trivy/db/trivy.db` path. |
| 641 | skipUpdate: false |
| 642 | # The offlineScan option prevents Trivy from sending API requests to identify dependencies. |
| 643 | # |
| 644 | # Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it. |
| 645 | # For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't |
| 646 | # exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode. |
| 647 | # It would work if all the dependencies are in local. |
| 648 | # This option doesn’t affect DB download. You need to specify skipUpdate as well as offlineScan in an air-gapped environment. |
| 649 | offlineScan: false |
| 650 | # Comma-separated list of what security issues to detect. Possible values are `vuln`, `config` and `secret`. Defaults to `vuln`. |
| 651 | securityCheck: "vuln" |
| 652 | # The duration to wait for scan completion |
| 653 | timeout: 5m0s |
| 654 | resources: |
| 655 | requests: |
| 656 | cpu: 200m |
| 657 | memory: 512Mi |
| 658 | limits: |
| 659 | cpu: 1 |
| 660 | memory: 1Gi |
| 661 | extraEnvVars: [] |
| 662 | nodeSelector: {} |
| 663 | tolerations: [] |
| 664 | affinity: {} |
| 665 | # Spread Pods across failure-domains like regions, availability zones or nodes |
| 666 | topologySpreadConstraints: [] |
| 667 | # - maxSkew: 1 |
| 668 | # topologyKey: topology.kubernetes.io/zone |
| 669 | # nodeTaintsPolicy: Honor |
| 670 | # whenUnsatisfiable: DoNotSchedule |
| 671 | ## Additional deployment annotations |
| 672 | podAnnotations: {} |
| 673 | ## Additional deployment labels |
| 674 | podLabels: {} |
| 675 | ## The priority class to run the pod as |
| 676 | priorityClassName: |
| 677 | |
| 678 | database: |
| 679 | # if external database is used, set "type" to "external" |
| 680 | # and fill the connection information in "external" section |
| 681 | type: internal |
| 682 | internal: |
| 683 | # set the service account to be used, default if left empty |
| 684 | serviceAccountName: "" |
| 685 | # mount the service account token |
| 686 | automountServiceAccountToken: false |
| 687 | image: |
| 688 | repository: goharbor/harbor-db |
| 689 | tag: v2.10.0 |
| 690 | # The initial superuser password for internal database |
| 691 | password: "changeit" |
| 692 | # The size limit for Shared memory, pgSQL use it for shared_buffer |
| 693 | # More details see: |
| 694 | # https://github.com/goharbor/harbor/issues/15034 |
| 695 | shmSizeLimit: 512Mi |
| 696 | # resources: |
| 697 | # requests: |
| 698 | # memory: 256Mi |
| 699 | # cpu: 100m |
| 700 | # The timeout used in livenessProbe; 1 to 5 seconds |
| 701 | livenessProbe: |
| 702 | timeoutSeconds: 1 |
| 703 | # The timeout used in readinessProbe; 1 to 5 seconds |
| 704 | readinessProbe: |
| 705 | timeoutSeconds: 1 |
| 706 | extraEnvVars: [] |
| 707 | nodeSelector: {} |
| 708 | tolerations: [] |
| 709 | affinity: {} |
| 710 | ## The priority class to run the pod as |
| 711 | priorityClassName: |
| 712 | initContainer: |
| 713 | migrator: {} |
| 714 | # resources: |
| 715 | # requests: |
| 716 | # memory: 128Mi |
| 717 | # cpu: 100m |
| 718 | permissions: {} |
| 719 | # resources: |
| 720 | # requests: |
| 721 | # memory: 128Mi |
| 722 | # cpu: 100m |
| 723 | external: |
| 724 | host: "192.168.0.1" |
| 725 | port: "5432" |
| 726 | username: "user" |
| 727 | password: "password" |
| 728 | coreDatabase: "registry" |
| 729 | # if using existing secret, the key must be "password" |
| 730 | existingSecret: "" |
| 731 | # "disable" - No SSL |
| 732 | # "require" - Always SSL (skip verification) |
| 733 | # "verify-ca" - Always SSL (verify that the certificate presented by the |
| 734 | # server was signed by a trusted CA) |
| 735 | # "verify-full" - Always SSL (verify that the certification presented by the |
| 736 | # server was signed by a trusted CA and the server host name matches the one |
| 737 | # in the certificate) |
| 738 | sslmode: "disable" |
| 739 | # The maximum number of connections in the idle connection pool per pod (core+exporter). |
| 740 | # If it <=0, no idle connections are retained. |
| 741 | maxIdleConns: 100 |
| 742 | # The maximum number of open connections to the database per pod (core+exporter). |
| 743 | # If it <= 0, then there is no limit on the number of open connections. |
| 744 | # Note: the default number of connections is 1024 for postgre of harbor. |
| 745 | maxOpenConns: 900 |
| 746 | ## Additional deployment annotations |
| 747 | podAnnotations: {} |
| 748 | ## Additional deployment labels |
| 749 | podLabels: {} |
| 750 | |
| 751 | redis: |
| 752 | # if external Redis is used, set "type" to "external" |
| 753 | # and fill the connection information in "external" section |
| 754 | type: internal |
| 755 | internal: |
| 756 | # set the service account to be used, default if left empty |
| 757 | serviceAccountName: "" |
| 758 | # mount the service account token |
| 759 | automountServiceAccountToken: false |
| 760 | image: |
| 761 | repository: goharbor/redis-photon |
| 762 | tag: v2.10.0 |
| 763 | # resources: |
| 764 | # requests: |
| 765 | # memory: 256Mi |
| 766 | # cpu: 100m |
| 767 | extraEnvVars: [] |
| 768 | nodeSelector: {} |
| 769 | tolerations: [] |
| 770 | affinity: {} |
| 771 | ## The priority class to run the pod as |
| 772 | priorityClassName: |
| 773 | # # jobserviceDatabaseIndex defaults to "1" |
| 774 | # # registryDatabaseIndex defaults to "2" |
| 775 | # # trivyAdapterIndex defaults to "5" |
| 776 | # # harborDatabaseIndex defaults to "0", but it can be configured to "6", this config is optional |
| 777 | # # cacheLayerDatabaseIndex defaults to "0", but it can be configured to "7", this config is optional |
| 778 | jobserviceDatabaseIndex: "1" |
| 779 | registryDatabaseIndex: "2" |
| 780 | trivyAdapterIndex: "5" |
| 781 | # harborDatabaseIndex: "6" |
| 782 | # cacheLayerDatabaseIndex: "7" |
| 783 | external: |
| 784 | # support redis, redis+sentinel |
| 785 | # addr for redis: <host_redis>:<port_redis> |
| 786 | # addr for redis+sentinel: <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3> |
| 787 | addr: "192.168.0.2:6379" |
| 788 | # The name of the set of Redis instances to monitor, it must be set to support redis+sentinel |
| 789 | sentinelMasterSet: "" |
| 790 | # The "coreDatabaseIndex" must be "0" as the library Harbor |
| 791 | # used doesn't support configuring it |
| 792 | # harborDatabaseIndex defaults to "0", but it can be configured to "6", this config is optional |
| 793 | # cacheLayerDatabaseIndex defaults to "0", but it can be configured to "7", this config is optional |
| 794 | coreDatabaseIndex: "0" |
| 795 | jobserviceDatabaseIndex: "1" |
| 796 | registryDatabaseIndex: "2" |
| 797 | trivyAdapterIndex: "5" |
| 798 | # harborDatabaseIndex: "6" |
| 799 | # cacheLayerDatabaseIndex: "7" |
| 800 | # username field can be an empty string, and it will be authenticated against the default user |
| 801 | username: "" |
| 802 | password: "" |
| 803 | # If using existingSecret, the key must be REDIS_PASSWORD |
| 804 | existingSecret: "" |
| 805 | ## Additional deployment annotations |
| 806 | podAnnotations: {} |
| 807 | ## Additional deployment labels |
| 808 | podLabels: {} |
| 809 | |
| 810 | exporter: |
| 811 | replicas: 1 |
| 812 | revisionHistoryLimit: 10 |
| 813 | # resources: |
| 814 | # requests: |
| 815 | # memory: 256Mi |
| 816 | # cpu: 100m |
| 817 | extraEnvVars: [] |
| 818 | podAnnotations: {} |
| 819 | ## Additional deployment labels |
| 820 | podLabels: {} |
| 821 | serviceAccountName: "" |
| 822 | # mount the service account token |
| 823 | automountServiceAccountToken: false |
| 824 | image: |
| 825 | repository: goharbor/harbor-exporter |
| 826 | tag: v2.10.0 |
| 827 | nodeSelector: {} |
| 828 | tolerations: [] |
| 829 | affinity: {} |
| 830 | # Spread Pods across failure-domains like regions, availability zones or nodes |
| 831 | topologySpreadConstraints: [] |
| 832 | # - maxSkew: 1 |
| 833 | # topologyKey: topology.kubernetes.io/zone |
| 834 | # nodeTaintsPolicy: Honor |
| 835 | # whenUnsatisfiable: DoNotSchedule |
| 836 | cacheDuration: 23 |
| 837 | cacheCleanInterval: 14400 |
| 838 | ## The priority class to run the pod as |
| 839 | priorityClassName: |
| 840 | |
| 841 | metrics: |
| 842 | enabled: false |
| 843 | core: |
| 844 | path: /metrics |
| 845 | port: 8001 |
| 846 | registry: |
| 847 | path: /metrics |
| 848 | port: 8001 |
| 849 | jobservice: |
| 850 | path: /metrics |
| 851 | port: 8001 |
| 852 | exporter: |
| 853 | path: /metrics |
| 854 | port: 8001 |
| 855 | ## Create prometheus serviceMonitor to scrape harbor metrics. |
| 856 | ## This requires the monitoring.coreos.com/v1 CRD. Please see |
| 857 | ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md |
| 858 | ## |
| 859 | serviceMonitor: |
| 860 | enabled: false |
| 861 | additionalLabels: {} |
| 862 | # Scrape interval. If not set, the Prometheus default scrape interval is used. |
| 863 | interval: "" |
| 864 | # Metric relabel configs to apply to samples before ingestion. |
| 865 | metricRelabelings: |
| 866 | [] |
| 867 | # - action: keep |
| 868 | # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' |
| 869 | # sourceLabels: [__name__] |
| 870 | # Relabel configs to apply to samples before ingestion. |
| 871 | relabelings: |
| 872 | [] |
| 873 | # - sourceLabels: [__meta_kubernetes_pod_node_name] |
| 874 | # separator: ; |
| 875 | # regex: ^(.*)$ |
| 876 | # targetLabel: nodename |
| 877 | # replacement: $1 |
| 878 | # action: replace |
| 879 | |
| 880 | trace: |
| 881 | enabled: false |
| 882 | # trace provider: jaeger or otel |
| 883 | # jaeger should be 1.26+ |
| 884 | provider: jaeger |
| 885 | # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth |
| 886 | sample_rate: 1 |
| 887 | # namespace used to differentiate different harbor services |
| 888 | # namespace: |
| 889 | # attributes is a key value dict contains user defined attributes used to initialize trace provider |
| 890 | # attributes: |
| 891 | # application: harbor |
| 892 | jaeger: |
| 893 | # jaeger supports two modes: |
| 894 | # collector mode(uncomment endpoint and uncomment username, password if needed) |
| 895 | # agent mode(uncomment agent_host and agent_port) |
| 896 | endpoint: http://hostname:14268/api/traces |
| 897 | # username: |
| 898 | # password: |
| 899 | # agent_host: hostname |
| 900 | # export trace data by jaeger.thrift in compact mode |
| 901 | # agent_port: 6831 |
| 902 | otel: |
| 903 | endpoint: hostname:4318 |
| 904 | url_path: /v1/traces |
| 905 | compression: false |
| 906 | insecure: true |
| 907 | # timeout is in seconds |
| 908 | timeout: 10 |
| 909 | |
| 910 | # cache layer configurations |
| 911 | # if this feature enabled, harbor will cache the resource |
| 912 | # `project/project_metadata/repository/artifact/manifest` in the redis |
| 913 | # which help to improve the performance of high concurrent pulling manifest. |
| 914 | cache: |
| 915 | # default is not enabled. |
| 916 | enabled: false |
| 917 | # default keep cache for one day. |
| 918 | expireHours: 24 |