Files
charts/bitnami/influxdb/values.yaml
Bitnami Bot 23fcbaaaa7 [bitnami/influxdb] Release 6.3.4 (#26733)
* [bitnami/influxdb] Release 6.3.4 updating components versions

Signed-off-by: Bitnami Containers <bitnami-bot@vmware.com>

* Update CHANGELOG.md

Signed-off-by: Bitnami Containers <bitnami-bot@vmware.com>

---------

Signed-off-by: Bitnami Containers <bitnami-bot@vmware.com>
2024-06-05 06:07:51 +02:00

1246 lines
58 KiB
YAML

# Copyright Broadcom, Inc. All Rights Reserved.
# SPDX-License-Identifier: APACHE-2.0
## @section Global parameters
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
## @param global.imageRegistry Global Docker image registry
## @param global.imagePullSecrets Global Docker registry secret names as an array
## @param global.storageClass Global storage class for dynamic provisioning
##
global:
imageRegistry: ""
## E.g.
## imagePullSecrets:
## - myRegistryKeySecretName
##
imagePullSecrets: []
storageClass: ""
## Compatibility adaptations for Kubernetes platforms
##
compatibility:
## Compatibility adaptations for Openshift
##
openshift:
## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
##
adaptSecurityContext: auto
## @section Common parameters
## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set)
##
kubeVersion: ""
## @param nameOverride String to partially override influxdb.fullname template with a string (will prepend the release name)
##
nameOverride: ""
## @param fullnameOverride String to fully override influxdb.fullname template with a string
##
fullnameOverride: ""
## @param clusterDomain Default Kubernetes cluster domain
##
clusterDomain: cluster.local
## @param commonAnnotations Annotations to add to all deployed objects
##
commonAnnotations: {}
## @param commonLabels Labels to add to all deployed objects
##
commonLabels: {}
## @param extraDeploy Array of extra objects to deploy with the release
##
extraDeploy: []
## Enable diagnostic mode in the deployment
##
diagnosticMode:
## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
##
enabled: false
## @param diagnosticMode.command Command to override all containers in the deployment
##
command:
- sleep
## @param diagnosticMode.args Args to override all containers in the deployment
##
args:
- infinity
## @section InfluxDB&trade; parameters
## Bitnami InfluxDB&trade; image
## ref: https://hub.docker.com/r/bitnami/influxdb/tags/
## @param image.registry [default: REGISTRY_NAME] InfluxDB&trade; image registry
## @param image.repository [default: REPOSITORY_NAME/influxdb] InfluxDB&trade; image repository
## @skip image.tag InfluxDB&trade; image tag (immutable tags are recommended)
## @param image.digest InfluxDB&trade; image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
## @param image.pullPolicy InfluxDB&trade; image pull policy
## @param image.pullSecrets Specify docker-registry secret names as an array
## @param image.debug Specify if debug logs should be enabled
##
image:
registry: docker.io
repository: bitnami/influxdb
tag: 2.7.6-debian-12-r12
digest: ""
## Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## e.g:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## Set to true if you would like to see extra information on logs
##
debug: false
## Authentication parameters
##
auth:
## @param auth.enabled Enable/disable authentication (Variable to keep compatibility with InfluxDB&trade; v1, in v2 it will be ignored)
##
enabled: true
## @param auth.usePasswordFiles Whether to use files to provide secrets instead of env vars.
##
usePasswordFiles: false
## InfluxDB&trade; admin credentials
##
admin:
## @param auth.admin.username InfluxDB&trade; admin user name
##
username: admin
## @param auth.admin.password InfluxDB&trade; admin user's password
##
password: ""
## @param auth.admin.token InfluxDB&trade; admin user's token. Only valid with InfluxDB&trade; v2
##
token: ""
## @param auth.admin.org InfluxDB&trade; admin user's org. Only valid with InfluxDB&trade; v2
##
org: primary
## @param auth.admin.bucket InfluxDB&trade; admin user's bucket. Only valid with InfluxDB&trade; v2
##
bucket: primary
## @param auth.admin.retention InfluxDB&trade; admin user's bucket retention. Only valid with InfluxDB&trade; v2
##
retention: ""
## @param auth.createUserToken Whether to create tokens for the different users. Take into account these tokens are going to be created by CLI randomly and they will not be accessible from a secret. See more influxdb 2.0 [auth ref](https://docs.influxdata.com/influxdb/v2.0/security/tokens/)
## You should take into account these tokens are going to be created by CLI
## so it is not possible to get them by k8s secrets nor to provide them though
## values
## See more influxdb 2.0 auth ref: https://docs.influxdata.com/influxdb/v2.0/security/tokens/
##
createUserToken: false
## InfluxDB&trade; credentials for user with 'admin' privileges on the db specified at 'database' parameter
##
user:
## @param auth.user.username Name for InfluxDB&trade; user with 'admin' privileges on the bucket specified at `auth.user.bucket` and `auth.user.org` or `auth.admin.org`
##
username: ""
## @param auth.user.password InfluxDB&trade; password for `user.name` user
##
password: ""
## @param auth.user.org Org to be created on first run
##
org: ""
## @param auth.user.bucket Bucket to be created on first run
## already create. If it is not null a new bucket will be created.
##
bucket: ""
## InfluxDB&trade; credentials for user with 'read' privileges on the db specified at 'database' parameter
## @param auth.readUser.username Name for InfluxDB&trade; user with 'read' privileges on the bucket specified at `auth.user.bucket`
## @param auth.readUser.password InfluxDB&trade; password for `auth.readUser.username` user
##
readUser:
username: ""
password: ""
## InfluxDB&trade; credentials for user with 'write' privileges on the db specified at 'database' parameter
## @param auth.writeUser.username Name for InfluxDB&trade; user with 'read' privileges on the bucket specified at `auth.user.bucket`
## @param auth.writeUser.password InfluxDB&trade; password for `auth.writeUser.username` user
##
writeUser:
username: ""
password: ""
## @param auth.existingSecret Name of existing Secret object with InfluxDB&trade; credentials (`auth.admin.password`, `auth.user.password`, `auth.readUser.password`, and `auth.writeUser.password` will be ignored and picked up from this secret)
##
existingSecret: ""
## InfluxDB&trade; backend parameters
##
influxdb:
## @param influxdb.configuration Specify content for influxdb.conf
## Alternatively, you can put your config.yaml under the files/conf/ directory
##
## configuration: |-
## reporting-disabled: true
## http-bind-address: "127.0.0.1:8086"
## ...
##
configuration: ""
## @param influxdb.existingConfiguration Name of existing ConfigMap object with the InfluxDB&trade; configuration (`influxdb.configuration` will be ignored).
##
existingConfiguration: ""
## @param influxdb.initdbScripts Dictionary of initdb scripts
## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory
##
## initdbScripts:
## my_init_script.sh: |
## #!/bin/sh
## echo "Do something."
initdbScripts: {}
## @param influxdb.initdbScriptsCM Name of existing ConfigMap object with the initdb scripts (`influxdb.initdbScripts` will be ignored).
##
initdbScriptsCM: ""
## @param influxdb.initdbScriptsSecret Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`)
##
initdbScriptsSecret: ""
## @param influxdb.podAffinityPreset InfluxDB&trade; Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param influxdb.podAntiAffinityPreset InfluxDB&trade; Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## Node affinity preset
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
##
nodeAffinityPreset:
## @param influxdb.nodeAffinityPreset.type InfluxDB&trade; Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
type: ""
## @param influxdb.nodeAffinityPreset.key InfluxDB&trade; Node label key to match Ignored if `affinity` is set.
## E.g.
## key: "kubernetes.io/e2e-az-name"
##
key: ""
## @param influxdb.nodeAffinityPreset.values InfluxDB&trade; Node label values to match. Ignored if `affinity` is set.
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## @param influxdb.affinity InfluxDB&trade; Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
##
affinity: {}
## @param influxdb.nodeSelector InfluxDB&trade; Node labels for pod assignment
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
##
nodeSelector: {}
## @param influxdb.tolerations InfluxDB&trade; Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param influxdb.podAnnotations Annotations for InfluxDB&trade; pods
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## @param influxdb.podLabels Extra labels for InfluxDB&trade; pods
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
##
podLabels: {}
## @param influxdb.automountServiceAccountToken Mount Service Account token in pod
##
automountServiceAccountToken: false
## @param influxdb.hostAliases InfluxDB&trade; pods host aliases
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
##
hostAliases: []
## @param influxdb.updateStrategy.type InfluxDB&trade; statefulset/deployment strategy type
## Statefulset ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
## Deployment ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
##
updateStrategy:
## StrategyType
## Can be set to RollingUpdate or OnDelete
##
type: RollingUpdate
## @param influxdb.priorityClassName InfluxDB&trade; pods' priorityClassName
##
priorityClassName: ""
## @param influxdb.schedulerName Name of the k8s scheduler (other than default)
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
## @param influxdb.topologySpreadConstraints Topology Spread Constraints for pod assignment
## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
## The value is evaluated as a template
##
topologySpreadConstraints: []
## @param influxdb.podManagementPolicy podManagementPolicy to manage scaling operation of InfluxDB&trade; pods
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
##
podManagementPolicy: OrderedReady
## Configure Pods Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param influxdb.podSecurityContext.enabled Enabled InfluxDB&trade; pods' Security Context
## @param influxdb.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
## @param influxdb.podSecurityContext.sysctls Set kernel settings using the sysctl interface
## @param influxdb.podSecurityContext.supplementalGroups Set filesystem extra groups
## @param influxdb.podSecurityContext.fsGroup Set InfluxDB&trade; pod's Security Context fsGroup
##
podSecurityContext:
enabled: true
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
fsGroup: 1001
## Configure Container Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param influxdb.containerSecurityContext.enabled Enabled containers' Security Context
## @param influxdb.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param influxdb.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
## @param influxdb.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
## @param influxdb.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
## @param influxdb.containerSecurityContext.privileged Set container's Security Context privileged
## @param influxdb.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
## @param influxdb.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
## @param influxdb.containerSecurityContext.capabilities.drop List of capabilities to be dropped
## @param influxdb.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
##
containerSecurityContext:
enabled: true
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
privileged: false
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
## InfluxDB&trade; pods' resource requests and limits
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
## We usually recommend not to specify default resources and to leave this as a conscious
## choice for the user. This also increases chances charts run on environments with little
## resources, such as Minikube. If you do want to specify resources, uncomment the following
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
## @param influxdb.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if influxdb.resources is set (influxdb.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "nano"
## @param influxdb.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
##
resources: {}
## @param influxdb.command Override default container command (useful when using custom images)
##
command: []
## @param influxdb.args Override default container args (useful when using custom images)
##
args: []
## @param influxdb.lifecycleHooks for the InfluxDB&trade; container(s) to automate configuration before or after startup
##
lifecycleHooks: {}
## @param influxdb.extraEnvVars Array containing extra env vars to configure InfluxDB&trade;
## For example:
## extraEnvVars:
## - name: INFLUXDB_DATA_QUERY_LOG_ENABLED
## value: "true"
##
extraEnvVars: []
## @param influxdb.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for InfluxDB&trade; nodes
##
extraEnvVarsCM: ""
## @param influxdb.extraEnvVarsSecret Name of existing Secret containing extra env vars for InfluxDB&trade; nodes
##
extraEnvVarsSecret: ""
## @param influxdb.extraVolumes Array of extra volumes to be added to the deployment (evaluated as template). Requires setting extraVolumeMounts
##
extraVolumes: []
## @param influxdb.extraVolumeMounts Array of extra volume mounts to be added to the container (evaluated as template). Normally used with extraVolumes.
##
extraVolumeMounts: []
## @param influxdb.containerPorts.http InfluxDB&trade; container HTTP port
## @param influxdb.containerPorts.rpc InfluxDB&trade; container RPC port
##
containerPorts:
http: 8086
rpc: 8088
## Configure extra options for InfluxDB&trade; containers' liveness, readiness and startup probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
## @param influxdb.startupProbe.enabled Enable startupProbe
## @param influxdb.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
## @param influxdb.startupProbe.periodSeconds Period seconds for startupProbe
## @param influxdb.startupProbe.timeoutSeconds Timeout seconds for startupProbe
## @param influxdb.startupProbe.failureThreshold Failure threshold for startupProbe
## @param influxdb.startupProbe.successThreshold Success threshold for startupProbe
##
startupProbe:
enabled: false
initialDelaySeconds: 180
periodSeconds: 45
timeoutSeconds: 30
successThreshold: 1
failureThreshold: 6
## @param influxdb.livenessProbe.enabled Enable livenessProbe
## @param influxdb.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
## @param influxdb.livenessProbe.periodSeconds Period seconds for livenessProbe
## @param influxdb.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
## @param influxdb.livenessProbe.failureThreshold Failure threshold for livenessProbe
## @param influxdb.livenessProbe.successThreshold Success threshold for livenessProbe
##
livenessProbe:
enabled: true
initialDelaySeconds: 180
periodSeconds: 45
timeoutSeconds: 30
successThreshold: 1
failureThreshold: 6
## @param influxdb.readinessProbe.enabled Enable readinessProbe
## @param influxdb.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
## @param influxdb.readinessProbe.periodSeconds Period seconds for readinessProbe
## @param influxdb.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
## @param influxdb.readinessProbe.failureThreshold Failure threshold for readinessProbe
## @param influxdb.readinessProbe.successThreshold Success threshold for readinessProbe
##
readinessProbe:
enabled: true
initialDelaySeconds: 60
periodSeconds: 45
timeoutSeconds: 30
successThreshold: 1
failureThreshold: 6
## @param influxdb.customStartupProbe Override default startup probe
##
customStartupProbe: {}
## @param influxdb.customLivenessProbe Override default liveness probe
##
customLivenessProbe: {}
## @param influxdb.customReadinessProbe Override default readiness probe
##
customReadinessProbe: {}
## @param influxdb.sidecars Add additional sidecar containers to the InfluxDB&trade; pod(s)
## e.g:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
sidecars: []
## @param influxdb.initContainers Add additional init containers to the InfluxDB&trade; pod(s)
## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
## e.g:
## initContainers:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## command: ['sh', '-c', 'echo "hello world"']
##
initContainers: []
## Pod Disruption Budget configuration
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
## @param influxdb.pdb.create Enable/disable a Pod Disruption Budget creation
## @param influxdb.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
## @param influxdb.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `influxdb.pdb.minAvailable` and `influxdb.pdb.maxUnavailable` are empty.
##
pdb:
create: true
minAvailable: ""
maxUnavailable: ""
## Service parameters
##
service:
## @param influxdb.service.type Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`)
##
type: ClusterIP
## @param influxdb.service.ports.http InfluxDB&trade; HTTP port
## @param influxdb.service.ports.rpc InfluxDB&trade; RPC port
##
ports:
http: 8086
rpc: 8088
## @param influxdb.service.nodePorts [object] Specify the nodePort(s) value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
nodePorts:
http: ""
rpc: ""
## @param influxdb.service.loadBalancerIP loadBalancerIP if service type is `LoadBalancer`
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
loadBalancerIP: ""
## @param influxdb.service.loadBalancerSourceRanges Address that are allowed when service is LoadBalancer
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
## loadBalancerSourceRanges:
## - 10.10.10.0/24
loadBalancerSourceRanges: []
## @param influxdb.service.clusterIP Static clusterIP or None for headless services
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
## e.g:
## clusterIP: None
##
clusterIP: ""
## @param influxdb.service.externalTrafficPolicy InfluxDB&trade; service external traffic policy
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param influxdb.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
##
extraPorts: []
## @param influxdb.service.annotations Annotations for InfluxDB&trade; service
##
annotations: {}
## @param influxdb.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
## If "ClientIP", consecutive client requests will be directed to the same Pod
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
##
sessionAffinity: None
## @param influxdb.service.sessionAffinityConfig Additional settings for the sessionAffinity
## sessionAffinityConfig:
## clientIP:
## timeoutSeconds: 300
sessionAffinityConfig: {}
## @section InfluxDB Collectd&trade; parameters
collectd:
## @param collectd.enabled InfluxDB Collectd&trade; service enable
##
enabled: false
service:
## @param collectd.service.type Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`)
##
type: ClusterIP
## @param collectd.service.port InfluxDB Collectd&trade; UDP port (should match with corresponding port in influxdb.conf)
## This requires corresponding configuration in influxdb.conf to enable
## collectd block
##
port: 25826
## @param collectd.service.nodePort Kubernetes HTTP node port
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
nodePort: ""
## @param collectd.service.loadBalancerIP loadBalancerIP if service type is `LoadBalancer`
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
loadBalancerIP: ""
## @param collectd.service.loadBalancerSourceRanges Address that are allowed when service is LoadBalancer
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
## loadBalancerSourceRanges:
## - 10.10.10.0/24
loadBalancerSourceRanges: []
## @param collectd.service.clusterIP Static clusterIP or None for headless services
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
## e.g:
## clusterIP: None
##
clusterIP: ""
## @param collectd.service.externalTrafficPolicy InfluxDB Collectd&trade; service external traffic policy
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param collectd.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
##
extraPorts: []
## @param collectd.service.annotations Annotations for InfluxDB Collectd&trade; service
## metallb.universe.tf/allow-shared-ip: "true"
##
annotations: {}
## @param collectd.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
## If "ClientIP", consecutive client requests will be directed to the same mongos Pod
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
##
sessionAffinity: None
## @param collectd.service.sessionAffinityConfig Additional settings for the sessionAffinity
## sessionAffinityConfig:
## clientIP:
## timeoutSeconds: 300
##
sessionAffinityConfig: {}
## @section Exposing parameters
## Configure the ingress resource that allows you to access the
## influxdb installation. Set up the URL
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
##
ingress:
## @param ingress.enabled Enable ingress controller resource
##
enabled: false
## @param ingress.tls Create TLS Secret
## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" (tpl .Values.ingress.hostname .) }}
## You can use the ingress.secrets parameter to create this TLS secret on cert-manager to create it
##
tls: false
## DEPRECATED: Use ingress.annotations instead of ingress.certManager
## certManager: false
##
## @param ingress.pathType Ingress path type
##
pathType: ImplementationSpecific
## @param ingress.apiVersion Force Ingress API version (automatically detected if not set)
##
apiVersion: ""
## @param ingress.hostname Default host for the ingress resource (evaluated as template)
##
hostname: influxdb.local
## @param ingress.path Ingress path*' in order to use this
## with ALB ingress controllers.
##
path: /
## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md
## Use this parameter to set the required annotations for cert-manager, see
## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
##
## e.g:
## annotations:
## kubernetes.io/ingress.class: nginx
## cert-manager.io/cluster-issuer: cluster-issuer-name
##
annotations: {}
## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record.
## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
## extraHosts:
## - name: influxdb.local
## path: /
##
extraHosts: []
## @param ingress.extraPaths Additional arbitrary path/backend objects
## For example: The ALB ingress controller requires a special rule for handling SSL redirection.
## extraPaths:
## - path: /*
## backend:
## serviceName: ssl-redirect
## servicePort: use-annotation
##
extraPaths: []
## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record.
## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
## extraTls:
## - hosts:
## - influxdb.local
## secretName: influxdb.local-tls
##
extraTls: []
## @param ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
## - name: influxdb.local-tls
## key:
## certificate:
##
secrets: []
## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
##
ingressClassName: ""
## @param ingress.extraRules Additional rules to be covered with this ingress record
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
## e.g:
## extraRules:
## - host: example.local
## http:
## path: /
## backend:
## service:
## name: example-svc
## port:
## name: http
##
extraRules: []
## @section Metrics parameters
## Prometheus metrics
## ref: https://docs.influxdata.com/influxdb/v1.7/administration/server_monitoring/#influxdb-metrics-http-endpoint
##
metrics:
## @param metrics.enabled Enable the export of Prometheus metrics
##
enabled: false
service:
## @param metrics.service.type Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`)
##
type: ClusterIP
## @param metrics.service.port InfluxDB&trade; Prometheus port
##
port: 9122
## @param metrics.service.nodePort Kubernetes HTTP node port
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
nodePort: ""
## @param metrics.service.loadBalancerIP loadBalancerIP if service type is `LoadBalancer`
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
loadBalancerIP: ""
## @param metrics.service.loadBalancerSourceRanges Address that are allowed when service is LoadBalancer
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
## loadBalancerSourceRanges:
## - 10.10.10.0/24
loadBalancerSourceRanges: []
## @param metrics.service.clusterIP Static clusterIP or None for headless services
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
## e.g:
## clusterIP: None
##
clusterIP: ""
## @param metrics.service.annotations [object] Annotations for the Prometheus metrics service
##
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.metrics.service.port }}"
prometheus.io/path: "/metrics"
## @param metrics.service.externalTrafficPolicy Service external traffic policy
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param metrics.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
##
extraPorts: []
## @param metrics.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
## If "ClientIP", consecutive client requests will be directed to the same mongos Pod
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
##
sessionAffinity: None
## @param metrics.service.sessionAffinityConfig Additional settings for the sessionAffinity
## sessionAffinityConfig:
## clientIP:
## timeoutSeconds: 300
##
sessionAffinityConfig: {}
## Prometheus Operator ServiceMonitor configuration
##
serviceMonitor:
## @param metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`)
##
enabled: false
## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running
## e.g:
## namespace: monitoring
##
namespace: ""
## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped.
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
## e.g:
## interval: 10s
##
interval: ""
## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
## e.g:
## scrapeTimeout: 10s
##
scrapeTimeout: ""
## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
##
relabelings: []
## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
##
metricRelabelings: []
## @param metrics.serviceMonitor.selector Prometheus instance selector labels
## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
## e.g:
## selector:
## prometheus: my-prometheus
##
selector: {}
## @param metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels
##
honorLabels: false
## Network Policies
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
##
networkPolicy:
## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created
##
enabled: true
## @param networkPolicy.allowExternal Don't require server label for connections
## The Policy model to apply. When set to false, only pods with the correct
## server label will have network access to the ports server is listening
## on. When true, server will accept connections from any source
## (with the correct destination port).
##
allowExternal: true
## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
##
allowExternalEgress: true
## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraIngress:
## - ports:
## - port: 1234
## from:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
extraIngress: []
## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraEgress:
## - ports:
## - port: 1234
## to:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
##
extraEgress: []
## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
##
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## Persistence parameters
##
persistence:
## @param persistence.enabled Enable data persistence
##
enabled: true
## @param persistence.existingClaim Use a existing PVC which must be created manually before bound
## If defined, PVC must be created manually before volume will be bound
## The value is evaluated as a template
##
existingClaim: ""
## @param persistence.storageClass Specify the `storageClass` used to provision the volume
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner.
##
storageClass: ""
## @param persistence.accessModes Access mode of data volume
##
accessModes:
- ReadWriteOnce
## @param persistence.size Size of data volume
##
size: 8Gi
## @param persistence.annotations Persistent Volume Claim annotations
##
annotations: {}
## Pod Service Account
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
## @param serviceAccount.create Specifies whether a ServiceAccount should be created
## @param serviceAccount.name Name of the service account to use. If not set and create is true, a name is generated using the fullname template.
## @param serviceAccount.automountServiceAccountToken Automount service account token for the server service account
## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`.
##
serviceAccount:
## DEPRECATED: serviceAccount.enabled - Use serviceAccount.create instead
##
#enabled: false
create: true
name: ""
automountServiceAccountToken: false
annotations: {}
## Pod Security Policy
## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later
##
psp:
create: false
## Role Based Access
## Ref: https://kubernetes.io/docs/admin/authorization/rbac/
## @param rbac.create Create Role and RoleBinding (required for PSP to work)
##
rbac:
create: false
## @section Volume permissions parameters
## Init Container parameters
## Change the owner and group of the persistent volume mountpoint to 'runAsUser:fsGroup'
## values from the securityContext section.
##
volumePermissions:
## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume mountpoint to `runAsUser:fsGroup`
##
enabled: false
## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry
## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image name
## @skip volumePermissions.image.tag Init container volume-permissions image tag
## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy
## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array
##
image:
registry: docker.io
repository: bitnami/os-shell
tag: 12-debian-12-r22
digest: ""
## Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## e.g:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## Init container Security Context
## Note: the chown of the data folder is done to securityContext.runAsUser
## and not the below volumePermissions.securityContext.runAsUser
## When runAsUser is set to special value "auto", init container will try to chwon the
## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2`
## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed).
## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with
## pod securityContext.enabled=false and shmVolume.chmod.enabled=false
## @param volumePermissions.securityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param volumePermissions.securityContext.runAsUser User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto")
##
securityContext:
seLinuxOptions: {}
runAsUser: 0
## @section InfluxDB&trade; backup parameters
backup:
## @param backup.enabled Enable InfluxDB&trade; backup
##
enabled: false
## @param backup.directory Directory where backups are stored
##
directory: "/backups"
## @param backup.retentionDays Retention time in days for backups (older backups are deleted)
##
retentionDays: 10
## Persistence parameters
##
persistence:
## @param backup.persistence.ownConfig Prefer independent own persistence parameters to configure the backup volume
## When set to `false` (for backwards compatibility), the rest of the persistence parameters below will be ignored.
## This parameter will be set to `true` and removed in a future release.
##
ownConfig: false
## @param backup.persistence.enabled Enable data persistence for backup volume
##
enabled: true
## @param backup.persistence.existingClaim Use a existing PVC which must be created manually before bound
## If defined, PVC must be created manually before volume will be bound
## The value is evaluated as a template
##
existingClaim: ""
## @param backup.persistence.storageClass Specify the `storageClass` used to provision the volume
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner.
##
storageClass: ""
## @param backup.persistence.accessModes Access mode of data volume
##
accessModes:
- ReadWriteOnce
## @param backup.persistence.size Size of data volume
##
size: 8Gi
## @param backup.persistence.annotations Persistent Volume Claim annotations
##
annotations: {}
## Cronjob configuration
## This cronjob is used to create InfluxDB&trade; backups
##
cronjob:
## @param backup.cronjob.schedule Schedule in Cron format to save snapshots
## See https://en.wikipedia.org/wiki/Cron
##
schedule: "0 2 * * *"
## @param backup.cronjob.historyLimit Number of successful finished jobs to retain
##
historyLimit: 1
## @param backup.cronjob.podAnnotations Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## K8s Security Context for Backup Cronjob pods
## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
## @param backup.cronjob.podSecurityContext.enabled Enable security context for InfluxDB&trade; backup pods
## @param backup.cronjob.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
## @param backup.cronjob.podSecurityContext.sysctls Set kernel settings using the sysctl interface
## @param backup.cronjob.podSecurityContext.supplementalGroups Set filesystem extra groups
## @param backup.cronjob.podSecurityContext.fsGroup Group ID for the InfluxDB&trade; filesystem
##
podSecurityContext:
enabled: true
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
fsGroup: 1001
## K8s Security Context for Backup Cronjob containers
## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
## @param backup.cronjob.containerSecurityContext.enabled Enabled containers' Security Context
## @param backup.cronjob.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param backup.cronjob.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
## @param backup.cronjob.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
## @param backup.cronjob.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
## @param backup.cronjob.containerSecurityContext.privileged Set container's Security Context privileged
## @param backup.cronjob.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
## @param backup.cronjob.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
## @param backup.cronjob.containerSecurityContext.capabilities.drop List of capabilities to be dropped
## @param backup.cronjob.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
containerSecurityContext:
enabled: true
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
privileged: false
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
## @param backup.cronjob.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if influxdb.resources is set (influxdb.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "none"
## @param backup.cronjob.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
##
resources: {}
## @param backup.podAffinityPreset Backup &trade; Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param backup.podAntiAffinityPreset Backup&trade; Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## Node affinity preset
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
##
nodeAffinityPreset:
## @param backup.nodeAffinityPreset.type Backup&trade; Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
type: ""
## @param backup.nodeAffinityPreset.key Backup&trade; Node label key to match Ignored if `affinity` is set.
## E.g.
## key: "kubernetes.io/e2e-az-name"
##
key: ""
## @param backup.nodeAffinityPreset.values Backup&trade; Node label values to match. Ignored if `affinity` is set.
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## @param backup.affinity Backup&trade; Affinity for backup pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
##
affinity: {}
## @param backup.nodeSelector Backup&trade; Node labels for backup pod assignment
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
##
nodeSelector: {}
## @param backup.tolerations Backup&trade; Tolerations for backup pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Storage providers where to upload backups
##
uploadProviders:
## Google Storage Bucket configuration
## @param backup.uploadProviders.google.enabled enable upload to google storage bucket
## @param backup.uploadProviders.google.secret json secret with serviceaccount data to access Google storage bucket
## @param backup.uploadProviders.google.secretKey service account secret key name
## @param backup.uploadProviders.google.existingSecret Name of existing secret object with Google serviceaccount json credentials
## @param backup.uploadProviders.google.bucketName google storage bucket name name
##
google:
enabled: false
secret: ""
secretKey: "key.json"
existingSecret: ""
bucketName: "gs://bucket/influxdb"
## Bitnami Google Cloud SDK image
## ref: https://hub.docker.com/r/bitnami/google-cloud-sdk/tags/
## @param backup.uploadProviders.google.image.registry [default: REGISTRY_NAME] Google Cloud SDK image registry
## @param backup.uploadProviders.google.image.repository [default: REPOSITORY_NAME/google-cloud-sdk] Google Cloud SDK image name
## @skip backup.uploadProviders.google.image.tag Google Cloud SDK image tag
## @param backup.uploadProviders.google.image.digest Google Cloud SDK image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
## @param backup.uploadProviders.google.image.pullPolicy Google Cloud SDK image pull policy
## @param backup.uploadProviders.google.image.pullSecrets Specify docker-registry secret names as an array
##
image:
registry: docker.io
repository: bitnami/google-cloud-sdk
tag: 0.478.0-debian-12-r0
digest: ""
## Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## e.g:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## @param backup.uploadProviders.google.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if influxdb.resources is set (influxdb.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "none"
## @param backup.uploadProviders.google.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
##
resources: {}
## Azure Storage Container configuration
##
azure:
## @param backup.uploadProviders.azure.enabled Enable upload to azure storage container
## @param backup.uploadProviders.azure.secret Secret with credentials to access Azure storage
## @param backup.uploadProviders.azure.secretKey Service account secret key name
## @param backup.uploadProviders.azure.existingSecret Name of existing secret object
## @param backup.uploadProviders.azure.containerName Destination container
enabled: false
secret: ""
secretKey: "connection-string"
existingSecret: ""
containerName: "influxdb-container"
## Bitnami Azure CLI image
## ref: https://hub.docker.com/r/bitnami/azure-cli/tags/
## @param backup.uploadProviders.azure.image.registry [default: REGISTRY_NAME] Azure CLI image registry
## @param backup.uploadProviders.azure.image.repository [default: REPOSITORY_NAME/azure-cli] Azure CLI image repository
## @skip backup.uploadProviders.azure.image.tag Azure CLI image tag (immutable tags are recommended)
## @param backup.uploadProviders.azure.image.digest Azure CLI image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
## @param backup.uploadProviders.azure.image.pullPolicy Azure CLI image pull policy
## @param backup.uploadProviders.azure.image.pullSecrets Specify docker-registry secret names as an array
##
image:
registry: docker.io
repository: bitnami/azure-cli
tag: 2.61.0-debian-12-r0
digest: ""
## Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## e.g:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## @param backup.uploadProviders.azure.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if influxdb.resources is set (influxdb.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "none"
## @param backup.uploadProviders.azure.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
##
resources: {}
aws:
## @param backup.uploadProviders.aws.enabled Enable upload to aws s3 bucket
## @param backup.uploadProviders.aws.accessKeyID Access Key ID to access aws s3
## @param backup.uploadProviders.aws.secretAccessKey Secret Access Key to access aws s3
## @param backup.uploadProviders.aws.region Region of aws s3 bucket
## @param backup.uploadProviders.aws.existingSecret Name of existing secret object
## @param backup.uploadProviders.aws.bucketName aws s3 bucket name
## @param backup.uploadProviders.aws.endpoint aws s3 endpoint, no value default public endpoint aws s3 endpoint
enabled: false
accessKeyID: ""
secretAccessKey: ""
region: "us-east-1"
existingSecret: ""
bucketName: "s3://bucket/influxdb"
endpoint: ""
## Bitnami AWS CLI image
## ref: https://hub.docker.com/r/bitnami/aws-cli/tags
## @param backup.uploadProviders.aws.image.registry [default: REGISTRY_NAME] AWS CLI image registry
## @param backup.uploadProviders.aws.image.repository [default: REPOSITORY_NAME/aws-cli] AWS CLI image repository
## @skip backup.uploadProviders.aws.image.tag AWS CLI image tag (immutable tags are recommended)
## @param backup.uploadProviders.aws.image.digest AWS CLI image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
## @param backup.uploadProviders.aws.image.pullPolicy AWS CLI image pull policy
## @param backup.uploadProviders.aws.image.pullSecrets Specify docker-registry secret names as an array
##
image:
registry: docker.io
repository: bitnami/aws-cli
tag: 2.15.60-debian-12-r0
digest: ""
## Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## e.g:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## @param backup.uploadProviders.aws.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if influxdb.resources is set (influxdb.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "none"
## @param backup.uploadProviders.aws.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
##
resources: {}