Files
charts/bitnami/kubeapps/values.yaml
2021-01-12 09:57:51 +00:00

664 lines
22 KiB
YAML

## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Disable this feature flag to disallow users to discover available namespaces (only the ones they have access).
## When set to true, Kubeapps creates a ClusterRole to be able to list namespaces.
allowNamespaceDiscovery: true
## Enable IPv6 Configuration for Nginx
enableIPv6: false
## clusters can be configured with a list of clusters that Kubeapps can target for deployments.
## When populated with a single cluster (as it is by default), Kubeapps will not allow users to
## change the target cluster. When populated with multiple clusters, Kubeapps will present the clusters to
## the user as potential targets for install or browsing.
## Note that you can define a single cluster without an apiServiceURL and the chart will assume this is
## the name you are assigning to the cluster on which Kubeapps is itself installed. Specifying more than
## one cluster without an apiServiceURL will cause the chart display an error.
## The base64-encoded certificateAuthorityData can be obtained from the additional cluster's kube config
## file, for example, to get the ca data for the 0th cluster in your config (adjust the index 0 as necessary):
## kubectl --kubeconfig ~/.kube/kind-config-kubeapps-additional config view --raw -o jsonpath='{.clusters[0].cluster.certificate-authority-data}'
#
# clusters:
# - name: default
# - name: second-cluster
# apiServiceURL: https://second-cluster:6443
# # certificateAuthorityData is required for secure communication with the additional API server.
# certificateAuthorityData: LS0tLS1CRUdJ...
# # serviceToken is an optional token configured to allow LIST namespaces and packagemanifests (operators) only on the additional cluster
# # so that the UI can present a list of (only) those namespaces to which the user has access and the available operators.
# serviceToken: ...
clusters:
- name: default
## The frontend service is the main reverse proxy used to access the Kubeapps UI
## To expose Kubeapps externally either configure the ingress object below or
## set frontend.service.type=LoadBalancer in the frontend configuration.
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## Set to true to enable ingress record generation
##
enabled: false
## Set this to true in order to add the corresponding annotations for cert-manager
##
certManager: false
## When the ingress is enabled, a host pointing to this will be created
##
hostname: kubeapps.local
## Enable TLS configuration for the hostname defined at ingress.hostname parameter
## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }}
## You can use the ingress.secrets parameter to create this TLS secret or relay on cert-manager to create it
##
tls: false
## Ingress annotations done as key:value pairs
## For a full list of possible ingress annotations,
## please see https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
##
## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
##
annotations:
# kubernetes.io/ingress.class: nginx
## Keep the connection open with the API server even if idle (the default is 60 seconds)
## Setting it to 10 minutes which should be enough for our current use case of deploying/upgrading/deleting apps
##
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
## The list of additional hostnames to be covered with this ingress record.
## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
## extraHosts:
## - name: kubeapps.local
## path: /
## The tls configuration for additional hostnames to be covered with this ingress record.
## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
## extraTls:
## - hosts:
## - kubeapps.local
## secretName: kubeapps.local-tls
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
##
secrets: []
## - name: kubeapps.local-tls
## key:
## certificate:
## DEPRECATED: to be removed on 3.0.0
## The list of hostnames to be covered with this ingress record.
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
##
# hosts:
# - name: kubeapps.local
# path: /
# ## Set this to true in order to enable TLS on the ingress record
# ##
# tls: false
# ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
# ##
# tlsSecret: kubeapps.local-tls
## Frontend paramters
##
frontend:
replicaCount: 2
## Bitnami Nginx image
## ref: https://hub.docker.com/r/bitnami/nginx/tags/
##
image:
registry: docker.io
repository: bitnami/nginx
tag: 1.19.6-debian-10-r18
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Frontend service parameters
##
service:
## Service type
##
type: ClusterIP
## HTTP Port
##
port: 80
## Set a static load balancer IP (only when frontend.service.type="LoadBalancer")
## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer
##
# loadBalancerIP:
## Set a specific NodePort (only when frontend.service.type="NodePort")
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#nodeport
# nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
## NGINX containers' liveness and readiness probes
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 60
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 0
timeoutSeconds: 5
## NGINX containers' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
## Default values set based on usage data from running Kubeapps instances
## ref: https://github.com/kubeapps/kubeapps/issues/478#issuecomment-422979262
##
limits:
cpu: 250m
memory: 128Mi
requests:
cpu: 25m
memory: 32Mi
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Node labels for pod assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for pod assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: {}
## Use access_token as the Bearer when talking to the k8s api server
## Some K8s distributions such as GKE requires it
##
proxypassAccessTokenAsBearer: false
## Set large_client_header_buffers in nginx config
## Can be required when using OIDC or LDAP due to large cookies
#
largeClientHeaderBuffers: "4 32k"
## AppRepository Controller is the controller used to manage the repositories to
## sync. Set apprepository.initialRepos to configure the initial set of
## repositories to use when first installing Kubeapps.
##
apprepository:
## Running a single controller replica to avoid sync job duplication
##
replicaCount: 1
## Schedule for syncing apprepositories. Every ten minutes by default
# crontab: "*/10 * * * *"
## Bitnami Kubeapps AppRepository Controller image
## ref: https://hub.docker.com/r/bitnami/kubeapps-apprepository-controller/tags/
##
image:
registry: docker.io
repository: bitnami/kubeapps-apprepository-controller
tag: 2.0.1-scratch-r0
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Kubeapps assets synchronization tool
## Image used to perform chart repository syncs
## ref: https://hub.docker.com/r/bitnami/kubeapps-asset-syncer/tags/
##
syncImage:
registry: docker.io
repository: bitnami/kubeapps-asset-syncer
tag: 2.0.1-scratch-r0
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Initial charts repo proxies to configure
##
initialReposProxy:
enabled: false
# http_proxy: "http://yourproxy:3128"
# https_proxy: "http://yourproxy:3128"
# no_proxy: "0.0.0.0/0"
## Initial chart repositories to configure
##
initialRepos:
- name: bitnami
url: https://charts.bitnami.com/bitnami
# Additional repositories
# - name: chartmuseum
# url: https://chartmuseum.default:8080
# nodeSelector:
# somelabel: somevalue
# # Specify an Authorization Header if you are using an authentication method.
# authorizationHeader: "Bearer xrxNC..."
# # If you're providing your own certificates, please use this to add the certificates as secrets.
# # It should start with -----BEGIN CERTIFICATE----- or
# # -----BEGIN RSA PRIVATE KEY-----
# caCert:
# # Create this apprepository in a custom namespace
# namespace:
# https://github.com/kubeapps/kubeapps/issues/478#issuecomment-422979262
## AppRepository Controller containers' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
## Default values set based on usage data from running Kubeapps instances
## ref: https://github.com/kubeapps/kubeapps/issues/478#issuecomment-422979262
##
limits:
cpu: 250m
memory: 128Mi
requests:
cpu: 25m
memory: 32Mi
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Node labels for pod assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for pod assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: {}
## The controller watches all namespaces by default to support separate AppRepositories per namespace.
## Switch this off only if you require running multiple instances of Kubeapps in different namespaces
## without each instance watching AppRepositories of each other.
watchAllNamespaces: true
## Hooks are used to perform actions like populating apprepositories
## or creating required resources during installation or upgrade
##
hooks:
## Bitnami Kubectl image
## ref: https://hub.docker.com/r/bitnami/kubectl/tags/
##
image:
registry: docker.io
repository: bitnami/kubectl
tag: 1.18.14-debian-10-r12
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Affinity for hooks' pods assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Node labels for hooks' pods assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for hooks' pods assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: {}
# Kubeops is an interface between the Kubeapps Dashboard and Helm 3/Kubernetes.
kubeops:
replicaCount: 2
image:
registry: docker.io
repository: bitnami/kubeapps-kubeops
tag: 2.0.1-scratch-r2
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
service:
port: 8080
resources:
limits:
cpu: 250m
memory: 256Mi
requests:
cpu: 25m
memory: 32Mi
## Kubeops containers' liveness and readiness probes
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
httpGet:
path: /live
port: 8080
initialDelaySeconds: 60
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /ready
port: 8080
initialDelaySeconds: 0
timeoutSeconds: 5
nodeSelector: {}
tolerations: []
affinity: {}
## Assetsvc is used to serve assets metadata over a REST API.
##
assetsvc:
replicaCount: 2
## Bitnami Kubeapps Assetsvc image
## ref: https://hub.docker.com/r/bitnami/kubeapps-assetsvc/tags/
##
image:
registry: docker.io
repository: bitnami/kubeapps-assetsvc
tag: 2.0.1-scratch-r0
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Assetsvc service parameters
##
service:
## HTTP Port
##
port: 8080
## Assetsvc containers' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
## Default values set based on usage data from running Kubeapps instances
## ref: https://github.com/kubeapps/kubeapps/issues/478#issuecomment-422979262
##
limits:
cpu: 250m
memory: 128Mi
requests:
cpu: 25m
memory: 32Mi
## Assetsvc containers' liveness and readiness probes
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
httpGet:
path: /live
port: 8080
initialDelaySeconds: 60
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /ready
port: 8080
initialDelaySeconds: 0
timeoutSeconds: 5
## Affinity for Assetsvc pods assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Node labels for Assetsvc pods assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for Assetsvc pods assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: {}
## Dashboard serves the compiled static React frontend application. This is an
## internal service used by the main frontend reverse-proxy and should not be
## accessed directly.
##
dashboard:
replicaCount: 2
## Bitnami Kubeapps Dashboard image
## ref: https://hub.docker.com/r/bitnami/kubeapps-dashboard/tags/
##
image:
registry: docker.io
repository: bitnami/kubeapps-dashboard
tag: 2.0.1-debian-10-r50
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Dashboard service parameters
##
service:
## HTTP Port
##
port: 8080
## Dashboard containers' liveness and readiness probes
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 60
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 0
timeoutSeconds: 5
## Dashboard containers' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
## Default values set based on usage data from running Kubeapps instances
## ref: https://github.com/kubeapps/kubeapps/issues/478#issuecomment-422979262
##
limits:
cpu: 250m
memory: 128Mi
requests:
cpu: 25m
memory: 32Mi
## Affinity for Dashboard pods assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Node labels for Dashboard pods assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for Dashboard pods assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: {}
## PostgreSQL chart configuration
## ref: https://github.com/bitnami/charts/blob/master/bitnami/postgresql/values.yaml
##
postgresql:
## Enable replication for high availability
replication:
enabled: true
## Create a database for Kubeapps on the first run
postgresqlDatabase: assets
## Kubeapps uses PostgreSQL as a cache and persistence is not required
##
persistence:
enabled: false
size: 8Gi
## PostgreSQL credentials are handled by kubeapps to facilitate upgrades
##
existingSecret: kubeapps-db
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: false
## PostgreSQL containers' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 256Mi
cpu: 250m
## RBAC paramters
##
rbac:
## Perform creation of RBAC resources
##
create: true
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: false
runAsUser: 1001
# fsGroup: 1001
## Image used for the tests. The only requirement is to include curl
##
testImage:
registry: docker.io
repository: bitnami/nginx
tag: 1.19.6-debian-10-r18
# Auth Proxy configuration for OIDC support
# ref: https://github.com/kubeapps/kubeapps/blob/master/docs/user/using-an-OIDC-provider.md
authProxy:
## Set to true if Kubeapps should configure the OAuth login/logout URIs defined below.
#
enabled: false
## When authProxy.enabled is true, by default Kubeapps will deploy its own
## auth-proxy service as part of the Kubeapps frontend. Set external to true
## if you are configuring your own auth proxy service external to Kubeapps
## and therefore don't want Kubeapps to deploy its own auth-proxy.
#
external: false
## Overridable flags for OAuth URIs to which the Kubeapps frontend redirects for authn.
## Useful when serving Kubeapps under a sub path or using an external auth proxy.
##
oauthLoginURI: /oauth2/start
oauthLogoutURI: /oauth2/sign_out
## Skip the Kubeapps login page when using OIDC and directly redirect to the IdP
##
skipKubeappsLoginPage: false
## The remaining auth proxy values are relevant only if an internal auth-proxy is
## being configured by Kubeapps.
## Bitnami OAuth2 Proxy image
## ref: https://hub.docker.com/r/bitnami/oauth2-proxy/tags/
##
image:
registry: docker.io
repository: bitnami/oauth2-proxy
tag: 6.1.1-debian-10-r113
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Mandatory parameters for the internal auth-proxy.
##
provider: ""
clientID: ""
clientSecret: ""
## cookieSecret is used by oauth2-proxy to encrypt any credentials so that it requires
## no storage. Note that it must be a particular number of bytes. Recommend using the
## following to generate a cookieSecret as per the oauth2 configuration documentation
## at https://pusher.github.io/oauth2_proxy/configuration :
## python -c 'import os,base64; print base64.urlsafe_b64encode(os.urandom(16))'
cookieSecret: ""
## Use "example.com" to restrict logins to emails from example.com
emailDomain: "*"
## Additional flags for oauth2-proxy
##
additionalFlags: []
# - --ssl-insecure-skip-verify
# - --cookie-secure=false
# - --scope=https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/cloud-platform
# - --oidc-issuer-url=https://accounts.google.com # Only needed if provider is oidc
## OAuth2 Proxy containers' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
## Default values set based on usage data from running Kubeapps instances
## ref: https://github.com/kubeapps/kubeapps/issues/478#issuecomment-422979262
##
limits:
cpu: 250m
memory: 128Mi
requests:
cpu: 25m
memory: 32Mi
# Pinniped Proxy configuration for converting user OIDC tokens to k8s client authorization certs.
# NOTE: This component is alpha functionality in Kubeapps until we complete testing and documentation.
pinnipedProxy:
## Set to true if Kubeapps should configure pinniped-proxy on the frontend.
#
enabled: false
## pinnipedProxy service parameters
##
service:
## HTTP Port
##
port: 3333
image:
registry: docker.io
repository: kubeapps/pinniped-proxy
tag: latest
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Specify the (default) namespace in which pinniped concierge is installed
## on the cluster(s) as well as the default authenticator type and name for use by
## Kubeapps. These default options are useful if all clusters use have a similar
## pinniped-concierge setup, otherwise we need to specify these for each cluster.
defaultPinnipedNamespace: pinniped-concierge
defaultAuthenticatorType: JWTAuthenticator
defaultAuthenticatorName: jwt-authenticator
## OAuth2 Proxy containers' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
## Default values set based on usage data from running Kubeapps instances
## ref: https://github.com/kubeapps/kubeapps/issues/478#issuecomment-422979262
##
limits:
cpu: 250m
memory: 128Mi
requests:
cpu: 25m
memory: 32Mi
## Feature flags
## These are used to switch on in development features or new features which are ready to be released.
featureFlags:
invalidateCache: true