Files
charts/bitnami/kube-prometheus/values.yaml
Bitnami Bot 40c8a36c00 [bitnami/kube-prometheus] ⬆️ Update dependency references (#34624)
* [bitnami/kube-prometheus] Release 11.2.5 updating components versions

Signed-off-by: Bitnami Bot <bitnami.bot@broadcom.com>

* Update CHANGELOG.md

Signed-off-by: Bitnami Bot <bitnami.bot@broadcom.com>

---------

Signed-off-by: Bitnami Bot <bitnami.bot@broadcom.com>
2025-06-25 15:44:48 +02:00

3870 lines
180 KiB
YAML
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
# Copyright Broadcom, Inc. All Rights Reserved.
# SPDX-License-Identifier: APACHE-2.0
## @section Global parameters
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
##
## @param global.imageRegistry Global Docker image registry
## @param global.imagePullSecrets Global Docker registry secret names as an array
## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s)
##
global:
imageRegistry: ""
## E.g.
## imagePullSecrets:
## - myRegistryKeySecretName
##
imagePullSecrets: []
defaultStorageClass: ""
## Security parameters
##
security:
## @param global.security.allowInsecureImages Allows skipping image verification
allowInsecureImages: false
## Compatibility adaptations for Kubernetes platforms
##
compatibility:
## Compatibility adaptations for Openshift
##
openshift:
## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
##
adaptSecurityContext: auto
## @section Common parameters
##
## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set)
##
kubeVersion: ""
## @param nameOverride String to partially override `kube-prometheus.name` template with a string (will prepend the release name)
##
nameOverride: ""
## @param fullnameOverride String to fully override `kube-prometheus.fullname` template with a string
##
fullnameOverride: ""
## @param namespaceOverride String to fully override common.names.namespace
##
namespaceOverride: ""
## @param commonAnnotations Annotations to add to all deployed objects
##
commonAnnotations: {}
## @param commonLabels Labels to add to all deployed objects
##
commonLabels: {}
## @param extraDeploy Array of extra objects to deploy with the release
##
extraDeploy: []
## @param clusterDomain Kubernetes cluster domain name
##
clusterDomain: cluster.local
## @section Prometheus Operator Parameters
##
operator:
## @param operator.enabled Deploy Prometheus Operator to the cluster
##
enabled: true
## Bitnami Prometheus Operator image version
## ref: https://hub.docker.com/r/bitnami/prometheus-operator/tags/
## @param operator.image.registry [default: REGISTRY_NAME] Prometheus Operator image registry
## @param operator.image.repository [default: REPOSITORY_NAME/prometheus-operator] Prometheus Operator image repository
## @skip operator.image.tag Prometheus Operator image tag (immutable tags are recommended)
## @param operator.image.digest Prometheus Operator image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
## @param operator.image.pullPolicy Prometheus Operator image pull policy
## @param operator.image.pullSecrets Specify docker-registry secret names as an array
##
image:
registry: docker.io
repository: bitnami/prometheus-operator
tag: 0.83.0-debian-12-r4
digest: ""
## Specify a imagePullPolicy
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Example:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## @param operator.extraArgs Additional arguments passed to Prometheus Operator
## Example:
## extraArgs:
## - --namespaces={{ include "common.names.namespace" . }}
##
extraArgs: []
## @param operator.command Override default container command (useful when using custom images)
##
command: []
## @param operator.args Override default container args (useful when using custom images)
##
args: []
## @param operator.lifecycleHooks for the Prometheus Operator container(s) to automate configuration before or after startup
##
lifecycleHooks: {}
## @param operator.extraEnvVars Array with extra environment variables to add to Prometheus Operator nodes
## e.g:
## extraEnvVars:
## - name: FOO
## value: "bar"
##
extraEnvVars: []
## @param operator.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Prometheus Operator nodes
##
extraEnvVarsCM: ""
## @param operator.extraEnvVarsSecret Name of existing Secret containing extra env vars for Prometheus Operator nodes
##
extraEnvVarsSecret: ""
## @param operator.extraVolumes Optionally specify extra list of additional volumes for the Prometheus Operator pod(s)
##
extraVolumes: []
## @param operator.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Prometheus Operator container(s)
##
extraVolumeMounts: []
## @param operator.sidecars Add additional sidecar containers to the Prometheus Operator pod(s)
## e.g:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
sidecars: []
## @param operator.initContainers Add additional init containers to the Prometheus Operator pod(s)
## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
## e.g:
## initContainers:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## command: ['sh', '-c', 'echo "hello world"']
##
initContainers: []
## @param operator.automountServiceAccountToken Mount Service Account token in pod
##
automountServiceAccountToken: true
## @param operator.hostAliases Add deployment host aliases
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
##
hostAliases: []
## Service account for Prometheus Operator to use.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
##
serviceAccount:
## @param operator.serviceAccount.create Specify whether to create a ServiceAccount for Prometheus Operator
##
create: true
## @param operator.serviceAccount.name The name of the ServiceAccount to create
## If not set and create is true, a name is generated using the kube-prometheus.operator.fullname template
##
name: ""
## @param operator.serviceAccount.automountServiceAccountToken Automount service account token for the server service account
##
automountServiceAccountToken: false
## @param operator.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`.
##
annotations: {}
## @param operator.schedulerName Name of the Kubernetess scheduler (other than default)
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
## @param operator.terminationGracePeriodSeconds In seconds, time the given to the Prometheus Operator pod needs to terminate gracefully
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
##
terminationGracePeriodSeconds: ""
## @param operator.topologySpreadConstraints Topology Spread Constraints for pod assignment
## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
## The value is evaluated as a template
##
topologySpreadConstraints: []
## Prometheus Operator pods' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param operator.podSecurityContext.enabled Enable pod security context
## @param operator.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
## @param operator.podSecurityContext.sysctls Set kernel settings using the sysctl interface
## @param operator.podSecurityContext.supplementalGroups Set filesystem extra groups
## @param operator.podSecurityContext.fsGroup Group ID for the container filesystem
##
podSecurityContext:
enabled: true
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
fsGroup: 1001
## Prometheus Operator containers' Security Context (only main container)
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param operator.containerSecurityContext.enabled Enabled containers' Security Context
## @param operator.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param operator.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
## @param operator.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
## @param operator.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
## @param operator.containerSecurityContext.privileged Set container's Security Context privileged
## @param operator.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
## @param operator.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
## @param operator.containerSecurityContext.capabilities.drop List of capabilities to be dropped
## @param operator.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
##
containerSecurityContext:
enabled: true
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
privileged: false
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
## Prometheus Operator Service
##
service:
## @param operator.service.type Kubernetes service type
##
type: ClusterIP
## @param operator.service.ports.http Prometheus Operator service port
##
ports:
http: 8080
## @param operator.service.clusterIP Specific cluster IP when service type is cluster IP. Use `None` for headless service
## e.g:
## clusterIP: None
##
clusterIP: ""
## @param operator.service.nodePorts.http Kubernetes Service nodePort
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
## e.g:
## nodePort: 30080
##
nodePorts:
http: ""
## @param operator.service.loadBalancerIP `loadBalancerIP` if service type is `LoadBalancer`
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
loadBalancerIP: ""
## @param operator.service.loadBalancerClass Operator service Load Balancer class if service type is `LoadBalancer` (optional, cloud specific)
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
##
loadBalancerClass: ""
## @param operator.service.loadBalancerSourceRanges Address that are allowed when svc is `LoadBalancer`
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
## e.g:
## loadBalancerSourceRanges:
## - 10.10.10.0/24
##
loadBalancerSourceRanges: []
## @param operator.service.externalTrafficPolicy Enable client source IP preservation
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
## There are two available options: Cluster (default) and Local.
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param operator.service.healthCheckNodePort Specifies the health check node port (numeric port number) for the service if `externalTrafficPolicy` is set to Local.
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
healthCheckNodePort: ""
## @param operator.service.labels Additional labels for Prometheus Operator service
##
labels: {}
## @param operator.service.annotations Additional annotations for Prometheus Operator service
##
annotations: {}
## @param operator.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
##
extraPorts: []
## @param operator.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
## If "ClientIP", consecutive client requests will be directed to the same Pod
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
##
sessionAffinity: None
## @param operator.service.sessionAffinityConfig Additional settings for the sessionAffinity
## sessionAffinityConfig:
## clientIP:
## timeoutSeconds: 300
##
sessionAffinityConfig: {}
## Network Policies
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
##
networkPolicy:
## @param operator.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
##
enabled: true
## @param operator.networkPolicy.allowExternal Don't require server label for connections
## The Policy model to apply. When set to false, only pods with the correct
## server label will have network access to the ports server is listening
## on. When true, server will accept connections from any source
## (with the correct destination port).
##
allowExternal: true
## @param operator.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
##
allowExternalEgress: true
## @param operator.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraIngress:
## - ports:
## - port: 1234
## from:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
extraIngress: []
## @param operator.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraEgress:
## - ports:
## - port: 1234
## to:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
##
extraEgress: []
## @param operator.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
## @param operator.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
##
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## Create a servicemonitor for the operator
##
serviceMonitor:
## @param operator.serviceMonitor.enabled Creates a ServiceMonitor to monitor Prometheus Operator
##
enabled: true
## @param operator.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
##
jobLabel: ""
## @param operator.serviceMonitor.interval Scrape interval (use by default, falling back to Prometheus' default)
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
interval: ""
## @param operator.serviceMonitor.metricRelabelings Metric relabeling
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
##
metricRelabelings: []
## @param operator.serviceMonitor.relabelings Relabel configs
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
##
relabelings: []
## @param operator.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
scrapeTimeout: ""
## @param operator.serviceMonitor.labels Extra labels for the ServiceMonitor
##
labels: {}
## @param operator.serviceMonitor.annotations Extra annotations for the ServiceMonitor
##
annotations: {}
## @param operator.serviceMonitor.extraParameters Any extra parameter to be added to the endpoint configured in the ServiceMonitor
## (e.g. tlsConfig for further customization of the HTTPS behavior)
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.Endpoint
##
extraParameters: {}
## @param operator.serviceMonitor.sampleLimit Per-scrape limit on number of scraped samples that will be accepted.
##
sampleLimit: ""
## @param operator.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if operator.resources is set (operator.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "nano"
## @param operator.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
##
resources: {}
## @param operator.podAffinityPreset Pod affinity preset
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param operator.podAntiAffinityPreset Prometheus Operator Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## Node affinity preset
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
##
nodeAffinityPreset:
## @param operator.nodeAffinityPreset.type Prometheus Operator Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
##
type: ""
## @param operator.nodeAffinityPreset.key Prometheus Operator Node label key to match Ignored if `affinity` is set.
## E.g.
## key: "kubernetes.io/e2e-az-name"
##
key: ""
## @param operator.nodeAffinityPreset.values Prometheus Operator Node label values to match. Ignored if `affinity` is set.
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## @param operator.affinity Prometheus Operator Affinity for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## Note: operator.podAffinityPreset, operator.podAntiAffinityPreset, and operator.nodeAffinityPreset will be ignored when it's set
##
affinity: {}
## @param operator.nodeSelector Prometheus Operator Node labels for pod assignment
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
##
nodeSelector: {}
## @param operator.tolerations Prometheus Operator Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param operator.podAnnotations Annotations for Prometheus Operator pods
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## @param operator.podLabels Extra labels for Prometheus Operator pods
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
##
podLabels: {}
## @param operator.priorityClassName Priority class assigned to the Pods
##
priorityClassName: ""
## Configure extra options for liveness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param operator.livenessProbe.enabled Turn on and off liveness probe
## @param operator.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated
## @param operator.livenessProbe.periodSeconds How often to perform the probe
## @param operator.livenessProbe.timeoutSeconds When the probe times out
## @param operator.livenessProbe.failureThreshold Minimum consecutive failures for the probe
## @param operator.livenessProbe.successThreshold Minimum consecutive successes for the probe
##
livenessProbe:
enabled: true
initialDelaySeconds: 120
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Configure extra options for readiness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param operator.readinessProbe.enabled Turn on and off readiness probe
## @param operator.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated
## @param operator.readinessProbe.periodSeconds How often to perform the probe
## @param operator.readinessProbe.timeoutSeconds When the probe times out
## @param operator.readinessProbe.failureThreshold Minimum consecutive failures for the probe
## @param operator.readinessProbe.successThreshold Minimum consecutive successes for the probe
##
readinessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Configure extra options for startup probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param operator.startupProbe.enabled Turn on and off startup probe
## @param operator.startupProbe.initialDelaySeconds Delay before startup probe is initiated
## @param operator.startupProbe.periodSeconds How often to perform the probe
## @param operator.startupProbe.timeoutSeconds When the probe times out
## @param operator.startupProbe.failureThreshold Minimum consecutive failures for the probe
## @param operator.startupProbe.successThreshold Minimum consecutive successes for the probe
##
startupProbe:
enabled: false
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## @param operator.customLivenessProbe Custom livenessProbe that overrides the default one
##
customLivenessProbe: {}
## @param operator.customReadinessProbe Custom readinessProbe that overrides the default one
##
customReadinessProbe: {}
## @param operator.customStartupProbe Custom startupProbe that overrides the default one
##
customStartupProbe: {}
## @param operator.logLevel Log level for Prometheus Operator
##
logLevel: info
## @param operator.logFormat Log format for Prometheus Operator
##
logFormat: logfmt
## @param operator.configReloaderResources Set the prometheus config reloader side-car CPU and memory requests and limits.
## configReloaderResources:
## limits:
## cpu: 200m
## memory: 100Mi
## requests:
## cpu: 100m
## memory: 50Mi
##
configReloaderResources: {}
## @param operator.kubeletService.enabled If true, the operator will create and maintain a service for scraping kubelets
## @param operator.kubeletService.namespace Namespace to deploy the kubelet service
##
kubeletService:
enabled: true
namespace: kube-system
## Prometheus Configmap-reload image to use for reloading configmaps
## defaults to Bitnami Prometheus Operator (ref: https://hub.docker.com/r/bitnami/prometheus-operator/tags/)
##
prometheusConfigReloader:
## @param operator.prometheusConfigReloader.image Prometheus Config Reloader image. If not set, the same as `operator.image.registry`
## registry:
## repository:
## tag:
## digest: ""
## pullSecrets:
##
image: {}
## Prometheus config reload container's securityContext
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param operator.prometheusConfigReloader.containerSecurityContext.enabled Enabled containers' Security Context
## @param operator.prometheusConfigReloader.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param operator.prometheusConfigReloader.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
## @param operator.prometheusConfigReloader.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
## @param operator.prometheusConfigReloader.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
## @param operator.prometheusConfigReloader.containerSecurityContext.privileged Set container's Security Context privileged
## @param operator.prometheusConfigReloader.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
## @param operator.prometheusConfigReloader.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
## @param operator.prometheusConfigReloader.containerSecurityContext.capabilities.drop List of capabilities to be dropped
## @param operator.prometheusConfigReloader.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
##
containerSecurityContext:
enabled: true
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
privileged: false
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
## Configure extra options for liveness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param operator.prometheusConfigReloader.livenessProbe.enabled Turn on and off liveness probe
## @param operator.prometheusConfigReloader.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated
## @param operator.prometheusConfigReloader.livenessProbe.periodSeconds How often to perform the probe
## @param operator.prometheusConfigReloader.livenessProbe.timeoutSeconds When the probe times out
## @param operator.prometheusConfigReloader.livenessProbe.failureThreshold Minimum consecutive failures for the probe
## @param operator.prometheusConfigReloader.livenessProbe.successThreshold Minimum consecutive successes for the probe
##
livenessProbe:
enabled: true
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Configure extra options for readiness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param operator.prometheusConfigReloader.readinessProbe.enabled Turn on and off readiness probe
## @param operator.prometheusConfigReloader.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated
## @param operator.prometheusConfigReloader.readinessProbe.periodSeconds How often to perform the probe
## @param operator.prometheusConfigReloader.readinessProbe.timeoutSeconds When the probe times out
## @param operator.prometheusConfigReloader.readinessProbe.failureThreshold Minimum consecutive failures for the probe
## @param operator.prometheusConfigReloader.readinessProbe.successThreshold Minimum consecutive successes for the probe
##
readinessProbe:
enabled: true
initialDelaySeconds: 15
periodSeconds: 20
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Restrict the namespaces that the operator watches
## ref: `-namespaces` in https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/operator.md
## @param operator.namespaces Optional comma-separated list of namespaces to watch (default=all).
##
namespaces: ""
## Pod Disruption Budget configuration
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
## @param operator.pdb.create Enable/disable a Pod Disruption Budget creation
## @param operator.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
## @param operator.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable
##
pdb:
create: true
minAvailable: ""
maxUnavailable: ""
## @section Prometheus Parameters
##
## Deploy a Prometheus instance
##
prometheus:
## @param prometheus.enabled Deploy Prometheus to the cluster
##
enabled: true
## Bitnami Prometheus image version
## ref: https://hub.docker.com/r/bitnami/prometheus/tags/
## @param prometheus.image.registry [default: REGISTRY_NAME] Prometheus image registry
## @param prometheus.image.repository [default: REPOSITORY_NAME/prometheus] Prometheus image repository
## @skip prometheus.image.tag Prometheus image tag (immutable tags are recommended)
## @param prometheus.image.digest Prometheus image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
## @param prometheus.image.pullPolicy Prometheus image pull policy
## @param prometheus.image.pullSecrets Specify docker-registry secret names as an array
##
image:
registry: docker.io
repository: bitnami/prometheus
tag: 3.4.1-debian-12-r2
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Example:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## @param prometheus.defaultRules.create Create default rules for Prometheus
## @param prometheus.defaultRules.rules [object] Set of default rules for Prometheus that can be enabled/disabled
##
defaultRules:
create: true
rules:
alertmanager: true
etcd: true
configReloaders: true
general: true
k8sContainerCpuUsageSecondsTotal: true
k8sContainerMemoryCache: true
k8sContainerMemoryRss: true
k8sContainerMemorySwap: true
k8sContainerResource: true
k8sContainerMemoryWorkingSetBytes: true
k8sPodOwner: true
kubeApiserverAvailability: true
kubeApiserverBurnrate: true
kubeApiserverHistogram: true
kubeApiserverSlos: true
kubeControllerManager: true
kubelet: true
kubeProxy: true
kubePrometheusGeneral: true
kubePrometheusNodeRecording: true
kubernetesApps: true
kubernetesResources: true
kubernetesStorage: true
kubernetesSystem: true
kubeSchedulerAlerting: true
kubeSchedulerRecording: true
kubeStateMetrics: true
network: true
node: true
nodeExporterAlerting: true
nodeExporterRecording: true
prometheus: true
prometheusOperator: true
## Service account for Prometheus to use.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
##
serviceAccount:
## @param prometheus.serviceAccount.create Specify whether to create a ServiceAccount for Prometheus
##
create: true
## @param prometheus.serviceAccount.name The name of the ServiceAccount to create
## If not set and create is true, a name is generated using the kube-prometheus.prometheus.fullname template
##
name: ""
## @param prometheus.serviceAccount.annotations Additional annotations for created Prometheus ServiceAccount
## annotations:
## eks.amazonaws.com/role-arn: arn:aws:iam::ACCOUNT:role/prometheus
##
annotations: {}
## @param prometheus.serviceAccount.automountServiceAccountToken Automount service account token for the server service account
##
automountServiceAccountToken: false
## Prometheus pods' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param prometheus.podSecurityContext.enabled Enable security context
## @param prometheus.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
## @param prometheus.podSecurityContext.sysctls Set kernel settings using the sysctl interface
## @param prometheus.podSecurityContext.supplementalGroups Set filesystem extra groups
## @param prometheus.podSecurityContext.fsGroup Group ID for the container filesystem
##
podSecurityContext:
enabled: true
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
fsGroup: 1001
## Prometheus containers' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param prometheus.containerSecurityContext.enabled Enabled containers' Security Context
## @param prometheus.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param prometheus.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
## @param prometheus.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
## @param prometheus.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
## @param prometheus.containerSecurityContext.privileged Set container's Security Context privileged
## @param prometheus.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
## @param prometheus.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
## @param prometheus.containerSecurityContext.capabilities.drop List of capabilities to be dropped
## @param prometheus.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
##
containerSecurityContext:
enabled: true
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
privileged: false
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
## Configure pod disruption budgets for Prometheus
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
## @param prometheus.pdb.create Create a pod disruption budget for Prometheus
## @param prometheus.pdb.minAvailable Minimum number / percentage of pods that should remain scheduled
## @param prometheus.pdb.maxUnavailable Maximum number / percentage of pods that may be made unavailable
##
pdb:
create: true
minAvailable: ""
maxUnavailable: ""
## Network Policies
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
##
networkPolicy:
## @param prometheus.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
##
enabled: true
## @param prometheus.networkPolicy.allowExternal Don't require server label for connections
## The Policy model to apply. When set to false, only pods with the correct
## server label will have network access to the ports server is listening
## on. When true, server will accept connections from any source
## (with the correct destination port).
##
allowExternal: true
## @param prometheus.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
##
allowExternalEgress: true
## @param prometheus.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraIngress:
## - ports:
## - port: 1234
## from:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
extraIngress: []
## @param prometheus.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraEgress:
## - ports:
## - port: 1234
## to:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
##
extraEgress: []
## @param prometheus.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
## @param prometheus.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
##
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## Prometheus Service
##
service:
## @param prometheus.service.type Kubernetes service type
##
type: ClusterIP
## @param prometheus.service.ports.http Prometheus service port
##
ports:
http: 9090
## @param prometheus.service.clusterIP Specific cluster IP when service type is cluster IP. Use `None` for headless service
## e.g: clusterIP: None
##
clusterIP: ""
## @param prometheus.service.nodePorts.http Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
## e.g:
## nodePort: 30090
##
nodePorts:
http: ""
## @param prometheus.service.loadBalancerIP `loadBalancerIP` if service type is `LoadBalancer`
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
loadBalancerIP: ""
## @param prometheus.service.loadBalancerClass Prometheus service Load Balancer class if service type is `LoadBalancer` (optional, cloud specific)
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
##
loadBalancerClass: ""
## @param prometheus.service.loadBalancerSourceRanges Address that are allowed when service is `LoadBalancer`
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
## e.g:
## loadBalancerSourceRanges:
## - 10.10.10.0/24
##
loadBalancerSourceRanges: []
## @param prometheus.service.externalTrafficPolicy Enable client source IP preservation
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
## There are two available options: Cluster (default) and Local
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param prometheus.service.healthCheckNodePort Specifies the health check node port
## if externalTrafficPolicy is set to Local.
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
healthCheckNodePort: ""
## @param prometheus.service.labels Additional labels for Prometheus service (this value is evaluated as a template)
##
labels: {}
## @param prometheus.service.annotations Additional annotations for Prometheus service (this value is evaluated as a template)
##
annotations: {}
## @param prometheus.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
## If "ClientIP", consecutive client requests will be directed to the same Pod
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
##
sessionAffinity: None
## @param prometheus.service.sessionAffinityConfig Additional settings for the sessionAffinity
## sessionAffinityConfig:
## clientIP:
## timeoutSeconds: 300
##
sessionAffinityConfig: {}
serviceMonitor:
## @param prometheus.serviceMonitor.enabled Creates a ServiceMonitor to monitor Prometheus itself
##
enabled: true
## @param prometheus.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
##
jobLabel: ""
## @param prometheus.serviceMonitor.interval Scrape interval (use by default, falling back to Prometheus' default)
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
interval: ""
## @param prometheus.serviceMonitor.metricRelabelings Metric relabeling
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
##
metricRelabelings: []
## @param prometheus.serviceMonitor.relabelings Relabel configs
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
##
relabelings: []
## @param prometheus.serviceMonitor.sampleLimit Per-scrape limit on number of scraped samples that will be accepted.
##
sampleLimit: ""
## Configure the ingress resource that allows you to access the
## Prometheus installation. Set up the URL
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
##
ingress:
## @param prometheus.ingress.enabled Enable ingress controller resource
##
enabled: false
## @param prometheus.ingress.pathType Ingress Path type
##
pathType: ImplementationSpecific
## @param prometheus.ingress.apiVersion Override API Version (automatically detected if not set)
##
apiVersion: ""
## @param prometheus.ingress.hostname Default host for the ingress resource
##
hostname: prometheus.local
## @param prometheus.ingress.path The Path to Prometheus. You may need to set this to '/*' in order to use this with ALB ingress controllers
##
path: /
## @param prometheus.ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md
## Use this parameter to set the required annotations for cert-manager, see
## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
##
## e.g:
## annotations:
## kubernetes.io/ingress.class: nginx
## cert-manager.io/cluster-issuer: cluster-issuer-name
##
annotations: {}
## @param prometheus.ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
##
ingressClassName: ""
## @param prometheus.ingress.tls Enable TLS configuration for the hostname defined at prometheus.ingress.hostname parameter
## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.prometheus.ingress.hostname }}
## You can use the prometheus.ingress.secrets parameter to create this TLS secret or relay on cert-manager to create it
##
tls: false
## @param prometheus.ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
##
selfSigned: false
## @param prometheus.ingress.extraHosts The list of additional hostnames to be covered with this ingress record.
## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
## extraHosts:
## - name: prometheus.local
## path: /
##
extraHosts: []
## @param prometheus.ingress.extraPaths Additional arbitrary path/backend objects
## For example: The ALB ingress controller requires a special rule for handling SSL redirection.
## extraPaths:
## - path: /*
## backend:
## serviceName: ssl-redirect
## servicePort: use-annotation
##
extraPaths: []
## @param prometheus.ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record.
## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
## extraTls:
## - hosts:
## - prometheus.local
## secretName: prometheus.local-tls
##
extraTls: []
## @param prometheus.ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
## e.g:
## secrets:
## - name: prometheus.local-tls
## key:
## certificate:
##
## NOTE: the secret name MUST match {{ingress.hostname}}-tls to be used if selfSigned is false or no certManager is used
secrets: []
## @param prometheus.ingress.extraRules Additional rules to be covered with this ingress record
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
## e.g:
## extraRules:
## - host: example.local
## http:
## path: /
## backend:
## service:
## name: example-svc
## port:
## name: http
##
extraRules: []
## @param prometheus.externalUrl External URL used to access Prometheus
## If not creating an ingress but still exposing the service some other way (like a proxy)
## let Prometheus know what its external URL is so that it can properly create links
## externalUrl: https://prometheus.example.com
##
externalUrl: ""
## @param prometheus.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if prometheus.resources is set (prometheus.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "small"
## @param prometheus.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
##
resources: {}
## @param prometheus.podAffinityPreset Prometheus Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param prometheus.podAntiAffinityPreset Prometheus Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## Node affinity preset
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
##
nodeAffinityPreset:
## @param prometheus.nodeAffinityPreset.type Prometheus Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
##
type: ""
## @param prometheus.nodeAffinityPreset.key Prometheus Node label key to match Ignored if `affinity` is set.
## E.g.
## key: "kubernetes.io/e2e-az-name"
##
key: ""
## @param prometheus.nodeAffinityPreset.values Prometheus Node label values to match. Ignored if `affinity` is set.
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## @param prometheus.affinity Prometheus Affinity for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## Note: prometheus.podAffinityPreset, prometheus.podAntiAffinityPreset, and prometheus.nodeAffinityPreset will be ignored when it's set
##
affinity: {}
## @param prometheus.nodeSelector Prometheus Node labels for pod assignment
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
##
nodeSelector: {}
## @param prometheus.topologySpreadConstraints Prometheus Topology Spread Constraints for pod assignment
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
## The value is evaluated as a template
##
topologySpreadConstraints: []
## @param prometheus.tolerations Prometheus Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param prometheus.scrapeInterval Interval between consecutive scrapes
##
scrapeInterval: ""
## @param prometheus.evaluationInterval Interval between consecutive evaluations
##
evaluationInterval: ""
## @param prometheus.scrapeTimeout Timeout after which the global scrape is ended
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config
##
scrapeTimeout: ""
## @param prometheus.sampleLimit Per-scrape max number of scraped samples. Requires Prometheus v2.45.0 and newer
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config
sampleLimit: ""
## @param prometheus.enforcedSampleLimit Override sampleLimits set by ServiceMonitor, PodMonitor or Probe objects
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.PrometheusSpec
enforcedSampleLimit: ""
## @param prometheus.keepDroppedTargets Limit per scrape config on the number of targets dropped by relabeling that will be kept in memory. 0 means no limit.
## Requires Prometheus v2.47.0 and newer
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config
keepDroppedTargets: ""
## @param prometheus.listenLocal ListenLocal makes the Prometheus server listen on loopback
##
listenLocal: false
## Configure extra options for liveness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param prometheus.livenessProbe.enabled Turn on and off liveness probe
## @param prometheus.livenessProbe.path Path of the HTTP service for checking the healthy state
## @param prometheus.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated
## @param prometheus.livenessProbe.periodSeconds How often to perform the probe
## @param prometheus.livenessProbe.timeoutSeconds When the probe times out
## @param prometheus.livenessProbe.failureThreshold Minimum consecutive failures for the probe
## @param prometheus.livenessProbe.successThreshold Minimum consecutive successes for the probe
##
livenessProbe:
enabled: true
path: /-/healthy
initialDelaySeconds: 0
failureThreshold: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
## Configure extra options for readiness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param prometheus.readinessProbe.enabled Turn on and off readiness probe
## @param prometheus.readinessProbe.path Path of the HTTP service for checking the ready state
## @param prometheus.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated
## @param prometheus.readinessProbe.periodSeconds How often to perform the probe
## @param prometheus.readinessProbe.timeoutSeconds When the probe times out
## @param prometheus.readinessProbe.failureThreshold Minimum consecutive failures for the probe
## @param prometheus.readinessProbe.successThreshold Minimum consecutive successes for the probe
##
readinessProbe:
enabled: true
path: /-/ready
initialDelaySeconds: 0
failureThreshold: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
## Configure extra options for startup probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param prometheus.startupProbe.enabled Turn on and off readiness probe
## @param prometheus.startupProbe.path Path of the HTTP service for checking the ready state
## @param prometheus.startupProbe.initialDelaySeconds Delay before readiness probe is initiated
## @param prometheus.startupProbe.periodSeconds How often to perform the probe
## @param prometheus.startupProbe.timeoutSeconds When the probe times out
## @param prometheus.startupProbe.failureThreshold Minimum consecutive failures for the probe
## @param prometheus.startupProbe.successThreshold Minimum consecutive successes for the probe
##
startupProbe:
enabled: true
path: /-/ready
initialDelaySeconds: 0
failureThreshold: 60
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 3
## @param prometheus.enableAdminAPI Enable Prometheus adminitrative API
## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis
##
enableAdminAPI: false
## @param prometheus.enableFeatures Enable access to Prometheus disabled features.
## ref: https://prometheus.io/docs/prometheus/latest/disabled_features/
##
enableFeatures: []
## @param prometheus.alertingEndpoints Alertmanagers to which alerts will be sent
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#alertmanagerendpoints
##
alertingEndpoints: []
## @param prometheus.externalLabels External labels to add to any time series or alerts when communicating with external systems
##
externalLabels: {}
## @param prometheus.replicaExternalLabelName Name of the external label used to denote replica name
##
replicaExternalLabelName: ""
## @param prometheus.replicaExternalLabelNameClear Clear external label used to denote replica name
##
replicaExternalLabelNameClear: false
## @param prometheus.routePrefix Prefix used to register routes, overriding externalUrl route
## Useful for proxies that rewrite URLs.
##
routePrefix: /
## @param prometheus.prometheusExternalLabelName Name of the external label used to denote Prometheus instance name
##
prometheusExternalLabelName: ""
## @param prometheus.prometheusExternalLabelNameClear Clear external label used to denote Prometheus instance name
##
prometheusExternalLabelNameClear: false
## @param prometheus.secrets Secrets that should be mounted into the Prometheus Pods
##
secrets: []
## @param prometheus.configMaps ConfigMaps that should be mounted into the Prometheus Pods
##
configMaps: []
## @param prometheus.querySpec The query command line flags when starting Prometheus
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#queryspec
##
querySpec: {}
## @param prometheus.ruleNamespaceSelector Namespaces to be selected for PrometheusRules discovery
## See https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage
##
ruleNamespaceSelector: {}
## @param prometheus.ruleSelector PrometheusRules to be selected for target discovery
## If {}, select all ServiceMonitors
##
ruleSelector: {}
## @param prometheus.serviceMonitorSelector ServiceMonitors to be selected for target discovery
## If {}, select all ServiceMonitors
##
serviceMonitorSelector: {}
## @param prometheus.serviceMonitorNamespaceSelector Namespaces to be selected for ServiceMonitor discovery
## See https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage
##
serviceMonitorNamespaceSelector: {}
## @param prometheus.podMonitorSelector PodMonitors to be selected for target discovery.
## If {}, select all PodMonitors
##
podMonitorSelector: {}
## @param prometheus.podMonitorNamespaceSelector Namespaces to be selected for PodMonitor discovery
## See https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage
##
podMonitorNamespaceSelector: {}
## @param prometheus.probeSelector Probes to be selected for target discovery.
## If {}, select all Probes
##
probeSelector: {}
## @param prometheus.probeNamespaceSelector Namespaces to be selected for Probe discovery
## See https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage
##
probeNamespaceSelector: {}
## @param prometheus.scrapeConfigSelector ScrapeConfig to be selected for target discovery.
## If {}, select all ScrapeConfig
##
scrapeConfigSelector: {}
## @param prometheus.scrapeConfigNamespaceSelector Namespaces to be selected for ScrapeConfig discovery
## See https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage
##
scrapeConfigNamespaceSelector: {}
## @param prometheus.scrapeClasses List of scrape classes to expose to scraping objects
## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#scrapeclass for usage
##
scrapeClasses: []
## @param prometheus.retention Metrics retention days
##
retention: 10d
## @param prometheus.retentionSize Maximum size of metrics
##
retentionSize: ""
## @param prometheus.disableCompaction Disable the compaction of the Prometheus TSDB
## See https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
## ref: https://prometheus.io/docs/prometheus/latest/storage/#compaction
##
disableCompaction: false
## @param prometheus.walCompression Enable compression of the write-ahead log using Snappy
##
walCompression: false
## @param prometheus.paused If true, the Operator won't process any Prometheus configuration changes
##
paused: false
## @param prometheus.replicaCount Number of Prometheus replicas desired
##
replicaCount: 1
## @param prometheus.shards Number of Prometheus shards desired
##
shards: 1
## @param prometheus.logLevel Log level for Prometheus
##
logLevel: info
## @param prometheus.logFormat Log format for Prometheus
##
logFormat: logfmt
## @param prometheus.nameValidationScheme Specifies the validation scheme for metric and label names
## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.NameValidationSchemeOptions
##
nameValidationScheme: "UTF8"
## @param prometheus.podMetadata [object] Standard object's metadata
## ref: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
##
podMetadata:
## labels:
## app: prometheus
## k8s-app: prometheus
##
labels: {}
annotations: {}
## @param prometheus.remoteRead The remote_read spec configuration for Prometheus
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotereadspec
## remoteRead:
## - url: http://remote1/read
##
remoteRead: []
## @param prometheus.remoteWrite The remote_write spec configuration for Prometheus
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotewritespec
## remoteWrite:
## - url: http://remote1/push
##
remoteWrite: []
## @param prometheus.enableRemoteWriteReceiver Enable Prometheus to be used as a receiver for the Prometheus remote write protocol.
##
enableRemoteWriteReceiver: false
## @param prometheus.storageSpec Prometheus StorageSpec for persistent data
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/storage.md
##
storageSpec: {}
## Prometheus persistence parameters
##
persistence:
## @param prometheus.persistence.enabled Use PVCs to persist data. If the storageSpec is provided this will not take effect.
##
enabled: false
## @param prometheus.persistence.storageClass Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner.
##
storageClass: ""
## @param prometheus.persistence.accessModes Persistent Volume Access Modes
##
accessModes:
- ReadWriteOnce
## @param prometheus.persistence.size Persistent Volume Size
##
size: 8Gi
## @param prometheus.persistence.annotations Persistent Volume Claim annotations
##
annotations: {}
## @param prometheus.priorityClassName Priority class assigned to the Pods
##
priorityClassName: ""
## @param prometheus.containers Containers allows injecting additional containers
##
containers: []
## @param prometheus.initContainers Add additional init containers to the prometheus pod(s)
## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
## e.g:
## initContainers:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## command: ['sh', '-c', 'echo "hello world"']
##
initContainers: []
## @param prometheus.volumes Volumes allows configuration of additional volumes
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
##
volumes: []
## @param prometheus.volumeMounts VolumeMounts allows configuration of additional VolumeMounts. Evaluated as a template
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
##
volumeMounts: []
## @param prometheus.additionalPrometheusRules PrometheusRule defines recording and alerting rules for a Prometheus instance.
## - name: custom-recording-rules
## groups:
## - name: sum_node_by_job
## rules:
## - record: job:kube_node_labels:sum
## expr: sum(kube_node_labels) by (job)
## - name: sum_prometheus_config_reload_by_pod
## rules:
## - record: job:prometheus_config_last_reload_successful:sum
## expr: sum(prometheus_config_last_reload_successful) by (pod)
## - name: custom-alerting-rules
## groups:
## - name: prometheus-config
## rules:
## - alert: PrometheusConfigurationReload
## expr: prometheus_config_last_reload_successful > 0
## for: 1m
## labels:
## severity: error
## annotations:
## summary: "Prometheus configuration reload (instance {{ $labels.instance }})"
## description: "Prometheus configuration reload error\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
## - name: custom-node-exporter-alerting-rules
## rules:
## - alert: PhysicalComponentTooHot
## expr: node_hwmon_temp_celsius > 75
## for: 5m
## labels:
## severity: warning
## annotations:
## summary: "Physical component too hot (instance {{ $labels.instance }})"
## description: "Physical hardware component too hot\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
## - alert: NodeOvertemperatureAlarm
## expr: node_hwmon_temp_alarm == 1
## for: 5m
## labels:
## severity: critical
## annotations:
## summary: "Node overtemperature alarm (instance {{ $labels.instance }})"
## description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
##
## @param prometheus.additionalArgs Allows setting additional arguments for the Prometheus container
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.Prometheus
##
additionalArgs: []
additionalPrometheusRules: []
## Note that the prometheus will fail to provision if the correct secret does not exist.
## @param prometheus.additionalScrapeConfigs.enabled Enable additional scrape configs
## @param prometheus.additionalScrapeConfigs.type Indicates if the cart should use external additional scrape configs or internal configs
## @param prometheus.additionalScrapeConfigs.external.name Name of the secret that Prometheus should use for the additional external scrape configuration
## @param prometheus.additionalScrapeConfigs.external.key Name of the key inside the secret to be used for the additional external scrape configuration
## @param prometheus.additionalScrapeConfigs.internal.jobList A list of Prometheus scrape jobs
##
additionalScrapeConfigs:
enabled: false
type: external
external:
## Name of the secret that Prometheus should use for the additional scrape configuration
##
name: ""
## Name of the key inside the secret to be used for the additional scrape configuration.
##
key: ""
internal:
jobList: []
## Enable additional Prometheus alert relabel configs that are managed externally to this chart
## Note that the prometheus will fail to provision if the correct secret does not exist.
## @param prometheus.additionalAlertRelabelConfigsExternal.enabled Enable additional Prometheus alert relabel configs that are managed externally to this chart
## @param prometheus.additionalAlertRelabelConfigsExternal.name Name of the secret that Prometheus should use for the additional Prometheus alert relabel configuration
## @param prometheus.additionalAlertRelabelConfigsExternal.key Name of the key inside the secret to be used for the additional Prometheus alert relabel configuration
##
additionalAlertRelabelConfigsExternal:
enabled: false
name: ""
key: ""
## Enable additional Prometheus AlertManager configs that are managed externally to this chart
## Note that the prometheus will fail to provision if the correct secret does not exist.
## @param prometheus.additionalAlertManagerExternal.enabled Enable additional Prometheus AlertManager configs that are managed externally to this chart
## @param prometheus.additionalAlertManagerExternal.name Name of the secret that Prometheus should use for the additional Prometheus AlertManager configuration
## @param prometheus.additionalAlertManagerExternal.key Name of the key inside the secret to be used for the additional Prometheus AlertManager configuration
##
additionalAlertManagerExternal:
enabled: false
name: ""
key: ""
## Thanos sidecar container configuration
##
thanos:
## @param prometheus.thanos.create Create a Thanos sidecar container
##
create: false
## Bitnami Thanos image
## ref: https://hub.docker.com/r/bitnami/thanos/tags/
## @param prometheus.thanos.image.registry [default: REGISTRY_NAME] Thanos image registry
## @param prometheus.thanos.image.repository [default: REPOSITORY_NAME/thanos] Thanos image name
## @skip prometheus.thanos.image.tag Thanos image tag
## @param prometheus.thanos.image.digest Thanos image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
## @param prometheus.thanos.image.pullPolicy Thanos image pull policy
## @param prometheus.thanos.image.pullSecrets Specify docker-registry secret names as an array
##
image:
registry: docker.io
repository: bitnami/thanos
tag: 0.39.0-debian-12-r0
digest: ""
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Example:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## Thanos Sidecar container's securityContext
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param prometheus.thanos.containerSecurityContext.enabled Enabled containers' Security Context
## @param prometheus.thanos.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param prometheus.thanos.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
## @param prometheus.thanos.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
## @param prometheus.thanos.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
## @param prometheus.thanos.containerSecurityContext.privileged Set container's Security Context privileged
## @param prometheus.thanos.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
## @param prometheus.thanos.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
## @param prometheus.thanos.containerSecurityContext.capabilities.drop List of capabilities to be dropped
## @param prometheus.thanos.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
##
containerSecurityContext:
enabled: true
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
privileged: false
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
## @param prometheus.thanos.containerPorts.grpc Thanos grpc port
## @param prometheus.thanos.containerPorts.http Thanos http port
##
containerPorts:
grpc: 10901
http: 10902
## @param prometheus.thanos.prometheusUrl Override default prometheus url `http://localhost:9090`
##
prometheusUrl: ""
## @param prometheus.thanos.extraArgs Additional arguments passed to the thanos sidecar container
## extraArgs:
## - --log.level=debug
## - --tsdb.path=/data/
##
extraArgs: []
## @param prometheus.thanos.objectStorageConfig.secretName Support mounting a Secret for the objectStorageConfig of the sideCar container.
## @param prometheus.thanos.objectStorageConfig.secretKey Secret key with the configuration file.
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/thanos.md
## objectStorageConfig:
## secretName: thanos-objstore-config
## secretKey: thanos.yaml
##
objectStorageConfig:
secretName: ""
secretKey: thanos.yaml
## @param prometheus.thanos.extraEnvVars Array with extra environment variables to add to the thanos sidecar container
## For example:
## extraEnvVars:
## - name: REQUEST_LOGGING_CONFIG
## valueFrom:
## secretKeyRef:
## name: thanos-request-logging-config
## key: request-logging-config.yml
##
extraEnvVars: []
## @param prometheus.thanos.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for the thanos sidecar container
##
extraEnvVarsCM: ""
## @param prometheus.thanos.extraEnvVarsSecret Name of existing Secret containing extra env vars for the thanos sidecar container
##
extraEnvVarsSecret: ""
## ref: https://github.com/thanos-io/thanos/blob/main/docs/components/sidecar.md
## @param prometheus.thanos.extraVolumeMounts Additional volumeMounts from `prometheus.volumes` for thanos sidecar container
## extraVolumeMounts:
## - name: my-secret-volume
## mountPath: /etc/thanos/secrets/my-secret
##
extraVolumeMounts: []
## Thanos sidecar container resource requests and limits.
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
## We usually recommend not to specify default resources and to leave this as a conscious
## choice for the user. This also increases chances charts run on environments with little
## resources, such as Minikube. If you do want to specify resources, uncomment the following
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
## @param prometheus.thanos.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if prometheus.thanos.resources is set (prometheus.thanos.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "nano"
## @param prometheus.thanos.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
##
resources: {}
## Configure extra options for liveness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param prometheus.thanos.livenessProbe.enabled Turn on and off liveness probe
## @param prometheus.thanos.livenessProbe.path Path of the HTTP service for checking the healthy state
## @param prometheus.thanos.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated
## @param prometheus.thanos.livenessProbe.periodSeconds How often to perform the probe
## @param prometheus.thanos.livenessProbe.timeoutSeconds When the probe times out
## @param prometheus.thanos.livenessProbe.failureThreshold Minimum consecutive failures for the probe
## @param prometheus.thanos.livenessProbe.successThreshold Minimum consecutive successes for the probe
##
livenessProbe:
enabled: true
path: /-/healthy
initialDelaySeconds: 0
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 120
successThreshold: 1
## Configure extra options for readiness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param prometheus.thanos.readinessProbe.enabled Turn on and off readiness probe
## @param prometheus.thanos.readinessProbe.path Path of the HTTP service for checking the ready state
## @param prometheus.thanos.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated
## @param prometheus.thanos.readinessProbe.periodSeconds How often to perform the probe
## @param prometheus.thanos.readinessProbe.timeoutSeconds When the probe times out
## @param prometheus.thanos.readinessProbe.failureThreshold Minimum consecutive failures for the probe
## @param prometheus.thanos.readinessProbe.successThreshold Minimum consecutive successes for the probe
##
readinessProbe:
enabled: true
path: /-/ready
initialDelaySeconds: 0
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 120
successThreshold: 1
## Thanos Sidecar Service
##
service:
## @param prometheus.thanos.service.type Kubernetes service type
##
type: ClusterIP
## @param prometheus.thanos.service.ports.grpc Thanos service port
##
ports:
grpc: 10901
## @param prometheus.thanos.service.clusterIP Specific cluster IP when service type is cluster IP. Use `None` to create headless service by default.
## Use a "headless" service by default so it returns every pod's IP instead of loadbalancing requests.
##
clusterIP: None
## @param prometheus.thanos.service.nodePorts.grpc Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
## e.g:
## nodePort: 30901
##
nodePorts:
grpc: ""
## @param prometheus.thanos.service.loadBalancerIP `loadBalancerIP` if service type is `LoadBalancer`
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
loadBalancerIP: ""
## @param prometheus.thanos.service.loadBalancerClass Thanos service Load Balancer class if service type is `LoadBalancer` (optional, cloud specific)
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
##
loadBalancerClass: ""
## @param prometheus.thanos.service.loadBalancerSourceRanges Address that are allowed when svc is `LoadBalancer`
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
## e.g:
## loadBalancerSourceRanges:
## - 10.10.10.0/24
##
loadBalancerSourceRanges: []
## @param prometheus.thanos.service.labels Additional labels for Thanos service
##
labels: {}
## @param prometheus.thanos.service.annotations Additional annotations for Thanos service
##
annotations: {}
## @param prometheus.thanos.service.extraPorts Additional ports to expose from the Thanos sidecar container
## extraPorts:
## - name: http
## port: 10902
## targetPort: http
## protocol: TCP
##
extraPorts: []
## @param prometheus.thanos.service.externalTrafficPolicy Prometheus service external traffic policy
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param prometheus.thanos.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
## If "ClientIP", consecutive client requests will be directed to the same Pod
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
##
sessionAffinity: None
## @param prometheus.thanos.service.sessionAffinityConfig Additional settings for the sessionAffinity
## sessionAffinityConfig:
## clientIP:
## timeoutSeconds: 300
##
sessionAffinityConfig: {}
## Configure the ingress resource that allows you to access the
## Thanos Sidecar installation. Set up the URL
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
##
ingress:
## @param prometheus.thanos.ingress.enabled Enable ingress controller resource
##
enabled: false
## @param prometheus.thanos.ingress.pathType Ingress path type
##
pathType: ImplementationSpecific
## @param prometheus.thanos.ingress.apiVersion Force Ingress API version (automatically detected if not set)
##
apiVersion: ""
## @param prometheus.thanos.ingress.hostname Default host for the ingress record
##
hostname: thanos.prometheus.local
## @param prometheus.thanos.ingress.path Default path for the ingress record
## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers
##
path: /
## @param prometheus.thanos.ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md
## Use this parameter to set the required annotations for cert-manager, see
## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
##
## Examples:
## kubernetes.io/ingress.class: nginx
## cert-manager.io/cluster-issuer: cluster-issuer-name
##
annotations: {}
## @param prometheus.thanos.ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
##
ingressClassName: ""
## @param prometheus.thanos.ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter
## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}`
## You can:
## - Use the `ingress.secrets` parameter to create this TLS secret
## - Relay on cert-manager to create it by setting `ingress.certManager=true`
## - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true`
##
tls: false
## @param prometheus.thanos.ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
##
selfSigned: false
## @param prometheus.thanos.ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record
## e.g:
## extraHosts:
## - name: thanos.prometheus.local
## path: /
##
extraHosts: []
## @param prometheus.thanos.ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host
## e.g:
## extraPaths:
## - path: /*
## backend:
## serviceName: ssl-redirect
## servicePort: use-annotation
##
extraPaths: []
## @param prometheus.thanos.ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
## e.g:
## extraTls:
## - hosts:
## - thanos.prometheus.local
## secretName: thanos.prometheus.local-tls
##
extraTls: []
## @param prometheus.thanos.ingress.secrets Custom TLS certificates as secrets
## NOTE: 'key' and 'certificate' are expected in PEM format
## NOTE: 'name' should line up with a 'secretName' set further up
## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates
## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
## e.g:
## secrets:
## - name: thanos.prometheus.local-tls
## key: |-
## -----BEGIN RSA PRIVATE KEY-----
## ...
## -----END RSA PRIVATE KEY-----
## certificate: |-
## -----BEGIN CERTIFICATE-----
## ...
## -----END CERTIFICATE-----
##
secrets: []
## @param prometheus.thanos.ingress.extraRules The list of additional rules to be added to this ingress record. Evaluated as a template
## Useful when looking for additional customization, such as using different backend
##
extraRules: []
## config-reloader sidecar container configuration
##
configReloader:
## config-reloader sidecar Service
##
service:
## @param prometheus.configReloader.service.enabled Enable config-reloader sidecar service
##
enabled: false
## @param prometheus.configReloader.service.type Kubernetes service type
##
type: ClusterIP
## @param prometheus.configReloader.service.ports.http config-reloader sidecar container service port
##
ports:
http: 8080
## @param prometheus.configReloader.service.clusterIP Specific cluster IP when service type is cluster IP. Use `None` to create headless service by default.
## Use a "headless" service by default so it returns every pod's IP instead of loadbalancing requests.
##
clusterIP: None
## @param prometheus.configReloader.service.nodePorts.http Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
## e.g:
## nodePort: 30901
##
nodePorts:
http: ""
## @param prometheus.configReloader.service.loadBalancerIP `loadBalancerIP` if service type is `LoadBalancer`
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
loadBalancerIP: ""
## @param prometheus.configReloader.service.loadBalancerClass Prometheus Config Reloader service Load Balancer class if service type is `LoadBalancer` (optional, cloud specific)
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
##
loadBalancerClass: ""
## @param prometheus.configReloader.service.loadBalancerSourceRanges Address that are allowed when svc is `LoadBalancer`
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
## e.g:
## loadBalancerSourceRanges:
## - 10.10.10.0/24
##
loadBalancerSourceRanges: []
## @param prometheus.configReloader.service.labels Additional labels for Prometheus service
##
labels: {}
## @param prometheus.configReloader.service.annotations Additional annotations for Prometheus service
##
annotations: {}
## @param prometheus.configReloader.service.extraPorts Additional ports to expose from the config-reloader sidecar container
## extraPorts:
## - name: http
## port: 10902
## targetPort: http
## protocol: TCP
##
extraPorts: []
## @param prometheus.configReloader.service.externalTrafficPolicy Prometheus service external traffic policy
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param prometheus.configReloader.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
## If "ClientIP", consecutive client requests will be directed to the same Pod
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
##
sessionAffinity: None
## @param prometheus.configReloader.service.sessionAffinityConfig Additional settings for the sessionAffinity
## sessionAffinityConfig:
## clientIP:
## timeoutSeconds: 300
##
sessionAffinityConfig: {}
## Create a ServiceMonitor to monitor Prometheus config-reloader sidecar
##
serviceMonitor:
## @param prometheus.configReloader.serviceMonitor.enabled Creates a ServiceMonitor to monitor Prometheus config-reloader sidecar
##
enabled: false
## @param prometheus.configReloader.serviceMonitor.interval Scrape interval (use by default, falling back to Prometheus' default)
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
interval: ""
## @param prometheus.configReloader.serviceMonitor.path HTTP path to scrape for metrics
##
path: /metrics
## @param prometheus.configReloader.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
##
jobLabel: ""
## @param prometheus.configReloader.serviceMonitor.metricRelabelings Metric relabeling
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
##
metricRelabelings: []
## @param prometheus.configReloader.serviceMonitor.relabelings Relabel configs
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
##
relabelings: []
## @param prometheus.configReloader.serviceMonitor.sampleLimit Per-scrape limit on number of scraped samples that will be accepted.
##
sampleLimit: ""
## @param prometheus.portName Port name used for the pods and governing service. This defaults to web
##
portName: web
## @section Alertmanager Parameters
##
## Configuration for alertmanager
## ref: https://prometheus.io/docs/alerting/alertmanager/
##
alertmanager:
## @param alertmanager.enabled Deploy Alertmanager to the cluster
##
enabled: true
## Bitnami Alertmanager image version
## ref: https://hub.docker.com/r/bitnami/alertmanager/tags/
## @param alertmanager.image.registry [default: REGISTRY_NAME] Alertmanager image registry
## @param alertmanager.image.repository [default: REPOSITORY_NAME/alertmanager] Alertmanager image repository
## @skip alertmanager.image.tag Alertmanager image tag (immutable tags are recommended)
## @param alertmanager.image.digest Alertmanager image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
## @param alertmanager.image.pullPolicy Alertmanager image pull policy
## @param alertmanager.image.pullSecrets Specify docker-registry secret names as an array
##
image:
registry: docker.io
repository: bitnami/alertmanager
tag: 0.28.1-debian-12-r11
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Example:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## Service account for Alertmanager to use.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
##
serviceAccount:
## @param alertmanager.serviceAccount.create Specify whether to create a ServiceAccount for Alertmanager
##
create: true
## @param alertmanager.serviceAccount.name The name of the ServiceAccount to create
## If not set and create is true, a name is generated using the kube-prometheus.alertmanager.fullname template
##
name: ""
## @param alertmanager.serviceAccount.automountServiceAccountToken Automount service account token for the server service account
##
automountServiceAccountToken: false
## @param alertmanager.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`.
##
annotations: {}
## Prometheus Alertmanager pods' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param alertmanager.podSecurityContext.enabled Enable security context
## @param alertmanager.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
## @param alertmanager.podSecurityContext.sysctls Set kernel settings using the sysctl interface
## @param alertmanager.podSecurityContext.supplementalGroups Set filesystem extra groups
## @param alertmanager.podSecurityContext.fsGroup Group ID for the container filesystem
##
podSecurityContext:
enabled: true
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
fsGroup: 1001
## Prometheus Alertmanager container's securityContext
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param alertmanager.containerSecurityContext.enabled Enabled containers' Security Context
## @param alertmanager.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param alertmanager.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
## @param alertmanager.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
## @param alertmanager.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
## @param alertmanager.containerSecurityContext.privileged Set container's Security Context privileged
## @param alertmanager.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
## @param alertmanager.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
## @param alertmanager.containerSecurityContext.capabilities.drop List of capabilities to be dropped
## @param alertmanager.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
##
containerSecurityContext:
enabled: true
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
privileged: false
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
## Configure pod disruption budgets for Alertmanager
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
## @param alertmanager.pdb.create Create a pod disruption budget for Alertmanager
## @param alertmanager.pdb.minAvailable Minimum number / percentage of pods that should remain scheduled
## @param alertmanager.pdb.maxUnavailable Maximum number / percentage of pods that may be made unavailable
##
pdb:
create: true
minAvailable: ""
maxUnavailable: ""
## Alertmanager Service
##
service:
## @param alertmanager.service.type Kubernetes service type
##
type: ClusterIP
## @param alertmanager.service.ports.http Alertmanager service port
##
ports:
http: 9093
## @param alertmanager.service.clusterIP Specific cluster IP when service type is cluster IP. Use `None` for headless service
## e.g:
## clusterIP: None
##
clusterIP: ""
## @param alertmanager.service.nodePorts.http Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
## e.g:
## nodePort: 30903
##
nodePorts:
http: ""
## @param alertmanager.service.loadBalancerIP `loadBalancerIP` if service type is `LoadBalancer`
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
loadBalancerIP: ""
## @param alertmanager.service.loadBalancerClass Alertmanager service Load Balancer class if service type is `LoadBalancer` (optional, cloud specific)
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
##
loadBalancerClass: ""
## @param alertmanager.service.loadBalancerSourceRanges Address that are allowed when svc is `LoadBalancer`
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
## e.g:
## loadBalancerSourceRanges:
## - 10.10.10.0/24
##
loadBalancerSourceRanges: []
## @param alertmanager.service.externalTrafficPolicy Enable client source IP preservation
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
## There are two available options: Cluster (default) and Local.
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param alertmanager.service.healthCheckNodePort Specifies the health check node port
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
healthCheckNodePort: ""
## @param alertmanager.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
##
extraPorts: []
## @param alertmanager.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
## If "ClientIP", consecutive client requests will be directed to the same Pod
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
##
sessionAffinity: None
## @param alertmanager.service.sessionAffinityConfig Additional settings for the sessionAffinity
## sessionAffinityConfig:
## clientIP:
## timeoutSeconds: 300
##
sessionAffinityConfig: {}
## @param alertmanager.service.annotations Additional annotations for Alertmanager service (this value is evaluated as a template)
##
annotations: {}
## Network Policies
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
##
networkPolicy:
## @param alertmanager.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
##
enabled: true
## @param alertmanager.networkPolicy.allowExternal Don't require server label for connections
## The Policy model to apply. When set to false, only pods with the correct
## server label will have network access to the ports server is listening
## on. When true, server will accept connections from any source
## (with the correct destination port).
##
allowExternal: true
## @param alertmanager.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
##
allowExternalEgress: true
## @param alertmanager.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraIngress:
## - ports:
## - port: 1234
## from:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
extraIngress: []
## @param alertmanager.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraEgress:
## - ports:
## - port: 1234
## to:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
##
extraEgress: []
## @param alertmanager.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
## @param alertmanager.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
##
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## If true, create a serviceMonitor for alertmanager
##
serviceMonitor:
## @param alertmanager.serviceMonitor.enabled Creates a ServiceMonitor to monitor Alertmanager
##
enabled: true
## @param alertmanager.serviceMonitor.interval Scrape interval. If not set, the Prometheus default scrape interval is used.
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
interval: ""
## @param alertmanager.serviceMonitor.metricRelabelings Metric relabeling
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
##
metricRelabelings: []
## @param alertmanager.serviceMonitor.relabelings Relabel configs
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
##
relabelings: []
## @param alertmanager.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
##
jobLabel: ""
## @param alertmanager.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
scrapeTimeout: ""
## @param alertmanager.serviceMonitor.selector ServiceMonitor selector labels
## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
##
## selector:
## prometheus: my-prometheus
##
selector: {}
## @param alertmanager.serviceMonitor.labels Extra labels for the ServiceMonitor
##
labels: {}
## @param alertmanager.serviceMonitor.annotations Extra annotations for the ServiceMonitor
##
annotations: {}
## @param alertmanager.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels
##
honorLabels: false
## @param alertmanager.serviceMonitor.extraParameters Any extra parameter to be added to the endpoint configured in the ServiceMonitor
## (e.g. tlsConfig for further customization of the HTTPS behavior)
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.Endpoint
##
extraParameters: {}
## @param alertmanager.serviceMonitor.sampleLimit Per-scrape limit on number of scraped samples that will be accepted.
##
sampleLimit: ""
## Configure the ingress resource that allows you to access the
## Alertmanager installation. Set up the URL
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
##
ingress:
## @param alertmanager.ingress.enabled Enable ingress controller resource
##
enabled: false
## @param alertmanager.ingress.pathType Ingress Path type
##
pathType: ImplementationSpecific
## @param alertmanager.ingress.apiVersion Override API Version (automatically detected if not set)
##
apiVersion: ""
## @param alertmanager.ingress.hostname Default host for the ingress resource
##
hostname: alertmanager.local
## @param alertmanager.ingress.path The Path to Alert Manager. You may need to set this to '/*' in order to use this with ALB ingress controllers.
##
path: /
## @param alertmanager.ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md
## Use this parameter to set the required annotations for cert-manager, see
## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
##
## e.g:
## annotations:
## kubernetes.io/ingress.class: nginx
## cert-manager.io/cluster-issuer: cluster-issuer-name
##
annotations: {}
## @param alertmanager.ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
##
ingressClassName: ""
## @param alertmanager.ingress.tls Enable TLS configuration for the hostname defined at alertmanager.ingress.hostname parameter
## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.alertmanager.ingress.hostname }}
## You can use the alertmanager.ingress.secrets parameter to create this TLS secret or relay on cert-manager to create it
##
tls: false
## @param alertmanager.ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
##
selfSigned: false
## @param alertmanager.ingress.extraHosts The list of additional hostnames to be covered with this ingress record.
## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
## extraHosts:
## - name: alertmanager.local
## path: /
##
extraHosts: []
## @param alertmanager.ingress.extraPaths Additional arbitrary path/backend objects
## For example: The ALB ingress controller requires a special rule for handling SSL redirection.
## extraPaths:
## - path: /*
## backend:
## serviceName: ssl-redirect
## servicePort: use-annotation
##
extraPaths: []
## @param alertmanager.ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record.
## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
## extraTls:
## - hosts:
## - alertmanager.local
## secretName: alertmanager.local-tls
##
extraTls: []
## @param alertmanager.ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
## secrets:
## - name: alertmanager.local-tls
## key:
## certificate:
##
secrets: []
## @param alertmanager.ingress.extraRules Additional rules to be covered with this ingress record
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
## e.g:
## extraRules:
## - host: example.local
## http:
## path: /
## backend:
## service:
## name: example-svc
## port:
## name: http
##
extraRules: []
## @param alertmanager.enableFeatures Enable access to Alertmanager disabled features.
##
enableFeatures: []
## @param alertmanager.externalUrl External URL used to access Alertmanager
## e.g:
## externalUrl: https://alertmanager.example.com
##
externalUrl: ""
## @param alertmanager.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if alertmanager.resources is set (alertmanager.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "nano"
## @param alertmanager.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
##
resources: {}
## @param alertmanager.podAffinityPreset Alertmanager Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param alertmanager.podAntiAffinityPreset Alertmanager Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## Node affinity preset
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
##
nodeAffinityPreset:
## @param alertmanager.nodeAffinityPreset.type Alertmanager Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
##
type: ""
## @param alertmanager.nodeAffinityPreset.key Alertmanager Node label key to match Ignored if `affinity` is set.
## E.g.
## key: "kubernetes.io/e2e-az-name"
##
key: ""
## @param alertmanager.nodeAffinityPreset.values Alertmanager Node label values to match. Ignored if `affinity` is set.
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## @param alertmanager.affinity Alertmanager Affinity for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## Note: alertmanager.podAffinityPreset, alertmanager.podAntiAffinityPreset, and alertmanager.nodeAffinityPreset will be ignored when it's set
##
affinity: {}
## @param alertmanager.nodeSelector Alertmanager Node labels for pod assignment
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
##
nodeSelector: {}
## @param alertmanager.tolerations Alertmanager Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Alertmanager configuration
## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
## @param alertmanager.config [object] Alertmanager configuration directive
## @skip alertmanager.config.route.group_by
## @skip alertmanager.config.route.routes
## @skip alertmanager.config.receivers
##
config:
global:
resolve_timeout: 5m
route:
group_by: ['job']
group_wait: 30s
group_interval: 5m
repeat_interval: 12h
receiver: 'null'
routes:
- match:
alertname: Watchdog
receiver: 'null'
receivers:
- name: 'null'
## @param alertmanager.templateFiles Extra files to be added inside the `alertmanager-{{ template "kube-prometheus.alertmanager.fullname" . }}` secret.
##
templateFiles: {}
## @param alertmanager.externalConfig Alertmanager configuration is created externally. If true, `alertmanager.config` is ignored, and a secret will not be created.
## Alertmanager requires a secret named `alertmanager-{{ template "kube-prometheus.alertmanager.fullname" . }}`
## It must contain:
## alertmanager.yaml: <config>
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/alerting.md#alerting
##
externalConfig: false
## @param alertmanager.replicaCount Number of Alertmanager replicas desired
##
replicaCount: 1
## Configure extra options for liveness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param alertmanager.livenessProbe.enabled Turn on and off liveness probe
## @param alertmanager.livenessProbe.path Path of the HTTP service for checking the healthy state
## @param alertmanager.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated
## @param alertmanager.livenessProbe.periodSeconds How often to perform the probe
## @param alertmanager.livenessProbe.timeoutSeconds When the probe times out
## @param alertmanager.livenessProbe.failureThreshold Minimum consecutive failures for the probe
## @param alertmanager.livenessProbe.successThreshold Minimum consecutive successes for the probe
##
livenessProbe:
enabled: true
path: /-/healthy
initialDelaySeconds: 0
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 120
successThreshold: 1
## Configure extra options for readiness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param alertmanager.readinessProbe.enabled Turn on and off readiness probe
## @param alertmanager.readinessProbe.path Path of the HTTP service for checking the ready state
## @param alertmanager.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated
## @param alertmanager.readinessProbe.periodSeconds How often to perform the probe
## @param alertmanager.readinessProbe.timeoutSeconds When the probe times out
## @param alertmanager.readinessProbe.failureThreshold Minimum consecutive failures for the probe
## @param alertmanager.readinessProbe.successThreshold Minimum consecutive successes for the probe
##
readinessProbe:
enabled: true
path: /-/ready
initialDelaySeconds: 0
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 120
successThreshold: 1
## @param alertmanager.logLevel Log level for Alertmanager
##
logLevel: info
## @param alertmanager.logFormat Log format for Alertmanager
##
logFormat: logfmt
## @param alertmanager.podMetadata [object] Standard object's metadata.
## ref: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
##
podMetadata:
labels: {}
annotations: {}
## @param alertmanager.secrets Secrets that should be mounted into the Alertmanager Pods
##
secrets: []
## @param alertmanager.configMaps ConfigMaps that should be mounted into the Alertmanager Pods
##
configMaps: []
## @param alertmanager.retention Metrics retention days
##
retention: 120h
## @param alertmanager.storageSpec Alertmanager StorageSpec for persistent data
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/storage.md
##
storageSpec: {}
## Alertmanager persistence parameters
##
persistence:
## @param alertmanager.persistence.enabled Use PVCs to persist data. If the storageSpec is provided this will not take effect.
## If you want to use this configuration make sure the storageSpec is not provided.
##
enabled: false
## @param alertmanager.persistence.storageClass Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner.
##
storageClass: ""
## @param alertmanager.persistence.accessModes Persistent Volume Access Modes
##
accessModes:
- ReadWriteOnce
## @param alertmanager.persistence.size Persistent Volume Size
##
size: 8Gi
## @param alertmanager.persistence.annotations Persistent Volume Claim annotations
##
annotations: {}
## @param alertmanager.paused If true, the Operator won't process any Alertmanager configuration changes
##
paused: false
## @param alertmanager.listenLocal ListenLocal makes the Alertmanager server listen on loopback
##
listenLocal: false
## @param alertmanager.containers Containers allows injecting additional containers
##
containers: []
## @param alertmanager.volumes Volumes allows configuration of additional volumes. Evaluated as a template
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#alertmanagerspec
##
volumes: []
## @param alertmanager.volumeMounts VolumeMounts allows configuration of additional VolumeMounts. Evaluated as a template
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/pi.md#alertmanagerspec
##
volumeMounts: []
## @param alertmanager.priorityClassName Priority class assigned to the Pods
##
priorityClassName: ""
## @param alertmanager.additionalPeers AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster
##
additionalPeers: []
## @param alertmanager.routePrefix Prefix used to register routes, overriding externalUrl route
## Useful for proxies that rewrite URLs.
##
routePrefix: /
## @param alertmanager.portName Port name used for the pods and governing service. This defaults to web
##
portName: web
## @param alertmanager.configNamespaceSelector Namespaces to be selected for AlertmanagerConfig discovery. If nil, only check own namespace. This defaults to {}
##
configNamespaceSelector: {}
## @param alertmanager.configSelector AlertmanagerConfigs to be selected for to merge and configure Alertmanager with. This defaults to {}
##
configSelector: {}
## @param alertmanager.configuration EXPERIMENTAL: alertmanagerConfiguration specifies the global Alertmanager configuration. If defined, it takes precedence over the `configSecret` field. This field may change in future releases. The specified global alertmanager config will not force add a namespace label in routes and inhibitRules
##
configuration: {}
## @param alertmanager.configMatcherStrategy alertmanagerConfigMatcherStrategy defines how AlertmanagerConfig objects match the alerts.
## E.g.
## configMatcherStrategy:
## type: OnNamespace
## If type set to `OnNamespace`, the operator injects a label matcher matching the
## namespace of the AlertmanagerConfig object for all its routes and inhibition
## rules. `None` will not add any additional matchers other than the ones
## specified in the AlertmanagerConfig. Default is `OnNamespace`.
configMatcherStrategy: {}
## @section Exporters
##
## Exporters
##
exporters:
node-exporter:
## @param exporters.node-exporter.enabled Enable node-exporter
##
enabled: true
kube-state-metrics:
## @param exporters.kube-state-metrics.enabled Enable kube-state-metrics
##
enabled: true
## @param node-exporter [object] Node Exporter deployment configuration
##
node-exporter:
service:
labels:
jobLabel: node-exporter
serviceMonitor:
enabled: true
jobLabel: jobLabel
extraArgs:
collector.filesystem.mount-points-exclude: "^/(dev|proc|sys|var/lib/docker/.+)($|/)"
collector.filesystem.fs-types-exclude: "^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$"
## @param kube-state-metrics [object] Kube State Metrics deployment configuration
##
kube-state-metrics:
serviceMonitor:
enabled: true
honorLabels: true
## Component scraping for kubelet and kubelet hosted cAdvisor
##
kubelet:
## @param kubelet.enabled Create a ServiceMonitor to scrape kubelet service
##
enabled: true
## @param kubelet.namespace Namespace where kubelet service is deployed. Related configuration `operator.kubeletService.namespace`
##
namespace: kube-system
serviceMonitor:
## @param kubelet.serviceMonitor.https Enable scraping of the kubelet over HTTPS
##
https: true
## @param kubelet.serviceMonitor.interval Scrape interval (use by default, falling back to Prometheus' default)
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
interval: ""
## @param kubelet.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
##
jobLabel: k8s-app
## @param kubelet.serviceMonitor.resource Enable scraping /metrics/resource from kubelet's service
##
resource: false
## @param kubelet.serviceMonitor.resourcePath From kubernetes 1.18, /metrics/resource/v1alpha1 was renamed to /metrics/resource
##
resourcePath: "/metrics/resource/v1alpha1"
## @param kubelet.serviceMonitor.resourceRelabelings Metric relabeling
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
##
resourceRelabelings: []
## @param kubelet.serviceMonitor.resourceMetricRelabelings Metric relabeling
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
##
resourceMetricRelabelings: []
## @param kubelet.serviceMonitor.metricRelabelings Metric relabeling
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
##
metricRelabelings: []
## @param kubelet.serviceMonitor.relabelings Relabel configs
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
##
relabelings: []
## @param kubelet.serviceMonitor.cAdvisor Enable scraping /metrics/cadvisor from kubelet's service
## ref: https://prometheus.io/docs/guides/cadvisor/#exploring-metrics-in-the-expression-browser
##
cAdvisor: true
## @param kubelet.serviceMonitor.cAdvisorMetricRelabelings Metric relabeling for scraping cAdvisor
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
##
cAdvisorMetricRelabelings: []
## @param kubelet.serviceMonitor.cAdvisorRelabelings Relabel configs for scraping cAdvisor
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
##
cAdvisorRelabelings: []
## @param kubelet.serviceMonitor.labels Extra labels for the ServiceMonitor
##
labels: {}
## @param kubelet.serviceMonitor.annotations Extra annotations for the ServiceMonitor
##
annotations: {}
## @param kubelet.serviceMonitor.sampleLimit Per-scrape limit on number of scraped samples that will be accepted.
##
sampleLimit: ""
## @section Blackbox Exporter Deployment Parameters
##
blackboxExporter:
## @param blackboxExporter.enabled Enable Blackbox Exporter deployment
##
enabled: true
## Bitnami Alertmanager image version
## ref: https://hub.docker.com/r/bitnami/prometheus-operator/tags/
## @param blackboxExporter.image.registry [default: REGISTRY_NAME] Blackbox Exporter image registry
## @param blackboxExporter.image.repository [default: REPOSITORY_NAME/blackbox-exporter] Blackbox Exporter image repository
## @param blackboxExporter.image.pullPolicy Blackbox Exporter image pull policy
## @skip blackboxExporter.image.tag Blackbox Exporter image tag (immutable tags are recommended)
## @param blackboxExporter.image.digest Blackbox Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
## @param blackboxExporter.image.pullSecrets Specify docker-registry secret names as an array
##
image:
registry: docker.io
repository: bitnami/blackbox-exporter
tag: 0.26.0-debian-12-r12
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Example:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## @param blackboxExporter.extraEnvVars Array with extra environment variables to add to blackboxExporter nodes
## e.g:
## extraEnvVars:
## - name: FOO
## value: "bar"
##
extraEnvVars: []
## @param blackboxExporter.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for blackboxExporter nodes
##
extraEnvVarsCM: ""
## @param blackboxExporter.extraEnvVarsSecret Name of existing Secret containing extra env vars for blackboxExporter nodes
##
extraEnvVarsSecret: ""
## @param blackboxExporter.command Override default container command (useful when using custom images)
##
command: []
## @param blackboxExporter.args Override default container args (useful when using custom images)
##
args: []
## @param blackboxExporter.replicaCount Number of Blackbox Exporter replicas to deploy
##
replicaCount: 1
## Configure extra options for Blackbox Exporter container liveness, readiness and startup probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
## @param blackboxExporter.livenessProbe.enabled Enable livenessProbe on Blackbox Exporter nodes
## @param blackboxExporter.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
## @param blackboxExporter.livenessProbe.periodSeconds Period seconds for livenessProbe
## @param blackboxExporter.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
## @param blackboxExporter.livenessProbe.failureThreshold Failure threshold for livenessProbe
## @param blackboxExporter.livenessProbe.successThreshold Success threshold for livenessProbe
##
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
## @param blackboxExporter.readinessProbe.enabled Enable readinessProbe on Blackbox Exporter nodes
## @param blackboxExporter.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
## @param blackboxExporter.readinessProbe.periodSeconds Period seconds for readinessProbe
## @param blackboxExporter.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
## @param blackboxExporter.readinessProbe.failureThreshold Failure threshold for readinessProbe
## @param blackboxExporter.readinessProbe.successThreshold Success threshold for readinessProbe
##
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
## @param blackboxExporter.startupProbe.enabled Enable startupProbe on Blackbox Exporter containers
## @param blackboxExporter.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
## @param blackboxExporter.startupProbe.periodSeconds Period seconds for startupProbe
## @param blackboxExporter.startupProbe.timeoutSeconds Timeout seconds for startupProbe
## @param blackboxExporter.startupProbe.failureThreshold Failure threshold for startupProbe
## @param blackboxExporter.startupProbe.successThreshold Success threshold for startupProbe
##
startupProbe:
enabled: false
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 15
successThreshold: 1
## @param blackboxExporter.customLivenessProbe Custom livenessProbe that overrides the default one
##
customLivenessProbe: {}
## @param blackboxExporter.customReadinessProbe Custom readinessProbe that overrides the default one
##
customReadinessProbe: {}
## @param blackboxExporter.customStartupProbe Custom startupProbe that overrides the default one
##
customStartupProbe: {}
## @param blackboxExporter.configuration [object] Blackbox Exporter configuration
##
configuration: |
"modules":
"http_2xx":
"http":
"preferred_ip_protocol": "ip4"
"prober": "http"
"http_post_2xx":
"http":
"method": "POST"
"preferred_ip_protocol": "ip4"
"prober": "http"
"irc_banner":
"prober": "tcp"
"tcp":
"preferred_ip_protocol": "ip4"
"query_response":
- "send": "NICK prober"
- "send": "USER prober prober prober :prober"
- "expect": "PING :([^ ]+)"
"send": "PONG ${1}"
- "expect": "^:[^ ]+ 001"
"pop3s_banner":
"prober": "tcp"
"tcp":
"preferred_ip_protocol": "ip4"
"query_response":
- "expect": "^+OK"
"tls": true
"tls_config":
"insecure_skip_verify": false
"ssh_banner":
"prober": "tcp"
"tcp":
"preferred_ip_protocol": "ip4"
"query_response":
- "expect": "^SSH-2.0-"
"tcp_connect":
"prober": "tcp"
"tcp":
"preferred_ip_protocol": "ip4"
## @param blackboxExporter.existingConfigMap ConfigMap pointing to the Blackbox Exporter configuration
##
existingConfigMap: ""
## @param blackboxExporter.containerPorts.http Blackbox Exporter HTTP container port
##
containerPorts:
http: 19115
serviceAccount:
## @param blackboxExporter.serviceAccount.create Enable creation of ServiceAccount for Prometheus Operator pod
##
create: true
## @param blackboxExporter.serviceAccount.name The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the common.names.fullname template
##
name: ""
## @param blackboxExporter.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
## Can be set to false if pods using this serviceAccount do not need to use K8s API
##
automountServiceAccountToken: false
## @param blackboxExporter.serviceAccount.annotations Additional custom annotations for the ServiceAccount
##
annotations: {}
## Blackbox Exporter resource requests and limits
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
## @param blackboxExporter.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if blackboxExporter.resources is set (blackboxExporter.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "nano"
## @param blackboxExporter.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
##
resources: {}
## Configure Pods Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param blackboxExporter.podSecurityContext.enabled Enabled Blackbox Exporter pods' Security Context
## @param blackboxExporter.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
## @param blackboxExporter.podSecurityContext.sysctls Set kernel settings using the sysctl interface
## @param blackboxExporter.podSecurityContext.supplementalGroups Set filesystem extra groups
## @param blackboxExporter.podSecurityContext.fsGroup Set Blackbox Exporter pod's Security Context fsGroup
##
podSecurityContext:
enabled: true
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
fsGroup: 1001
## Configure Container Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param blackboxExporter.containerSecurityContext.enabled Enabled containers' Security Context
## @param blackboxExporter.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param blackboxExporter.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
## @param blackboxExporter.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
## @param blackboxExporter.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
## @param blackboxExporter.containerSecurityContext.privileged Set container's Security Context privileged
## @param blackboxExporter.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
## @param blackboxExporter.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
## @param blackboxExporter.containerSecurityContext.capabilities.drop List of capabilities to be dropped
## @param blackboxExporter.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
##
containerSecurityContext:
enabled: true
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
privileged: false
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
## @param blackboxExporter.lifecycleHooks for the blackboxExporter container(s) to automate configuration before or after startup
##
lifecycleHooks: {}
## @param blackboxExporter.automountServiceAccountToken Mount Service Account token in pod
##
automountServiceAccountToken: false
## @param blackboxExporter.hostAliases blackboxExporter pods host aliases
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
##
hostAliases: []
## @param blackboxExporter.podLabels Extra labels for blackboxExporter pods
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
##
podLabels: {}
## @param blackboxExporter.podAnnotations Annotations for blackboxExporter pods
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## @param blackboxExporter.podAffinityPreset Pod affinity preset. Ignored if `blackboxExporter.affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param blackboxExporter.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `blackboxExporter.affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## Node blackboxExporter.affinity preset
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
##
nodeAffinityPreset:
## @param blackboxExporter.nodeAffinityPreset.type Node affinity preset type. Ignored if `blackboxExporter.affinity` is set. Allowed values: `soft` or `hard`
##
type: ""
## @param blackboxExporter.nodeAffinityPreset.key Node label key to match. Ignored if `blackboxExporter.affinity` is set
##
key: ""
## @param blackboxExporter.nodeAffinityPreset.values Node label values to match. Ignored if `blackboxExporter.affinity` is set
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## @param blackboxExporter.affinity Affinity for Blackbox Exporter pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## NOTE: `blackboxExporter.podAffinityPreset`, `blackboxExporter.podAntiAffinityPreset`, and `blackboxExporter.nodeAffinityPreset` will be ignored when it's set
##
affinity: {}
## @param blackboxExporter.nodeSelector Node labels for Blackbox Exporter pods assignment
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
##
nodeSelector: {}
## @param blackboxExporter.tolerations Tolerations for Blackbox Exporter pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param blackboxExporter.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
##
topologySpreadConstraints: []
## @param blackboxExporter.priorityClassName Blackbox Exporter pods' priorityClassName
##
priorityClassName: ""
## @param blackboxExporter.schedulerName Kubernetes pod scheduler registry
## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
## @param blackboxExporter.terminationGracePeriodSeconds In seconds, time the given to the Blackbox Exporter pod needs to terminate gracefully
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
##
terminationGracePeriodSeconds: ""
## @param blackboxExporter.updateStrategy.type Blackbox Exporter statefulset strategy type
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
##
updateStrategy:
type: RollingUpdate
## @param blackboxExporter.extraVolumes Optionally specify extra list of additional volumes for the Blackbox Exporter pod(s)
##
extraVolumes: []
## @param blackboxExporter.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Blackbox Exporter container(s)
##
extraVolumeMounts: []
## @param blackboxExporter.sidecars Add additional sidecar containers to the Blackbox Exporter pod(s)
## e.g:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
sidecars: []
## @param blackboxExporter.initContainers Add additional init containers to the Blackbox Exporter pod(s)
## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
## e.g:
## initContainers:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## command: ['sh', '-c', 'echo "hello world"']
##
initContainers: []
## @section Blackbox Exporter Traffic Exposure Parameters
##
## blackboxExporter service parameters
##
service:
## @param blackboxExporter.service.type Blackbox Exporter service type
##
type: ClusterIP
## @param blackboxExporter.service.ports.http Blackbox Exporter HTTP service port
##
ports:
http: 19115
## Node ports to expose
## NOTE: choose port between <30000-32767>
## @param blackboxExporter.service.nodePorts.http Node port for HTTP
##
nodePorts:
http: ""
## @param blackboxExporter.service.sessionAffinity Control where client requests go, to the same pod or round-robin
## Values: ClientIP or None
## ref: https://kubernetes.io/docs/concepts/services-networking/service/
##
sessionAffinity: None
## @param blackboxExporter.service.sessionAffinityConfig Additional settings for the sessionAffinity
## sessionAffinityConfig:
## clientIP:
## timeoutSeconds: 300
##
sessionAffinityConfig: {}
## @param blackboxExporter.service.clusterIP Blackbox Exporter service Cluster IP
## e.g.:
## clusterIP: None
##
clusterIP: ""
## @param blackboxExporter.service.loadBalancerIP Blackbox Exporter service Load Balancer IP
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
##
loadBalancerIP: ""
## @param blackboxExporter.service.loadBalancerClass Blackbox Exporter service Load Balancer class if service type is `LoadBalancer` (optional, cloud specific)
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
##
loadBalancerClass: ""
## @param blackboxExporter.service.loadBalancerSourceRanges Blackbox Exporter service Load Balancer sources
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
## e.g:
## loadBalancerSourceRanges:
## - 10.10.10.0/24
##
loadBalancerSourceRanges: []
## @param blackboxExporter.service.externalTrafficPolicy Blackbox Exporter service external traffic policy
## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param blackboxExporter.service.annotations Additional custom annotations for Blackbox Exporter service
##
annotations: {}
## @param blackboxExporter.service.extraPorts Extra ports to expose in the Blackbox Exporter service
##
extraPorts: []
## Network Policies
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
##
networkPolicy:
## @param blackboxExporter.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
##
enabled: true
## @param blackboxExporter.networkPolicy.allowExternal Don't require server label for connections
## The Policy model to apply. When set to false, only pods with the correct
## server label will have network access to the ports server is listening
## on. When true, server will accept connections from any source
## (with the correct destination port).
##
allowExternal: true
## @param blackboxExporter.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
##
allowExternalEgress: true
## @param blackboxExporter.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraIngress:
## - ports:
## - port: 1234
## from:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
extraIngress: []
## @param blackboxExporter.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraEgress:
## - ports:
## - port: 1234
## to:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
##
extraEgress: []
## @param blackboxExporter.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
## @param blackboxExporter.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
##
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## Pod Disruption Budget configuration
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
## @param blackboxExporter.pdb.create Enable/disable a Pod Disruption Budget creation
## @param blackboxExporter.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
## @param blackboxExporter.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable
##
pdb:
create: true
minAvailable: ""
maxUnavailable: ""
## Component scraping the kube-apiserver
##
kubeApiServer:
## @param kubeApiServer.enabled Create a ServiceMonitor to scrape kube-apiserver service
##
enabled: true
serviceMonitor:
## @param kubeApiServer.serviceMonitor.interval Scrape interval. If not set, the Prometheus default scrape interval is used.
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
interval: ""
## @param kubeApiServer.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
##
jobLabel: component
## @param kubeApiServer.serviceMonitor.metricRelabelings Metric relabeling
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
##
metricRelabelings: []
## @param kubeApiServer.serviceMonitor.relabelings Relabel configs
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
##
relabelings: []
## @param kubeApiServer.serviceMonitor.labels Extra labels for the ServiceMonitor
##
labels: {}
## @param kubeApiServer.serviceMonitor.annotations Extra annotations for the ServiceMonitor
##
annotations: {}
## @param kubeApiServer.serviceMonitor.sampleLimit Per-scrape limit on number of scraped samples that will be accepted.
##
sampleLimit: ""
## Component scraping the kube-controller-manager
##
kubeControllerManager:
## @param kubeControllerManager.enabled Create a ServiceMonitor to scrape kube-controller-manager service
##
enabled: true
## @param kubeControllerManager.endpoints If your kube controller manager is not deployed as a pod, specify IPs it can be found on
## endpoints:
## - 10.141.4.22
## - 10.141.4.23
## - 10.141.4.24
##
endpoints: []
## @param kubeControllerManager.namespace Namespace where kube-controller-manager service is deployed.
##
namespace: kube-system
## Service ports and selector information
## @param kubeControllerManager.service.enabled Whether or not to create a Service object for kube-controller-manager
## @param kubeControllerManager.service.ports.http Listening port of the kube-controller-manager Service object
## @param kubeControllerManager.service.targetPorts.http Port to target on the kube-controller-manager Pods. This should be the port that kube-controller-manager is exposing metrics on
## @param kubeControllerManager.service.selector Optional PODs Label selector for the service
##
service:
enabled: true
ports:
http: 10252
targetPorts:
http: 10252
## selector:
## component: kube-controller-manager
##
selector: {}
## @param kubeControllerManager.service.labels Additional labels for kube-controller-manaer service
##
labels: {}
serviceMonitor:
## @param kubeControllerManager.serviceMonitor.interval Scrape interval (use by default, falling back to Prometheus' default)
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
interval: ""
## @param kubeControllerManager.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
##
jobLabel: component
## @param kubeControllerManager.serviceMonitor.https Enable scraping kube-controller-manager over https
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
##
https: false
## @param kubeControllerManager.serviceMonitor.insecureSkipVerify Skip TLS certificate validation when scraping
##
insecureSkipVerify: ""
## @param kubeControllerManager.serviceMonitor.serverName Name of the server to use when validating TLS certificate
##
serverName: ""
## @param kubeControllerManager.serviceMonitor.metricRelabelings Metric relabeling
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
##
metricRelabelings: []
## @param kubeControllerManager.serviceMonitor.relabelings Relabel configs
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
##
relabelings: []
## @param kubeControllerManager.serviceMonitor.labels Extra labels for the ServiceMonitor
##
labels: {}
## @param kubeControllerManager.serviceMonitor.annotations Extra annotations for the ServiceMonitor
##
annotations: {}
## @param kubeControllerManager.serviceMonitor.sampleLimit Per-scrape limit on number of scraped samples that will be accepted.
##
sampleLimit: ""
## Component scraping kube scheduler
##
kubeScheduler:
## @param kubeScheduler.enabled Create a ServiceMonitor to scrape kube-scheduler service
##
enabled: true
## @param kubeScheduler.endpoints If your kube scheduler is not deployed as a pod, specify IPs it can be found on
## endpoints:
## - 10.141.4.22
## - 10.141.4.23
## - 10.141.4.24
##
endpoints: []
## @param kubeScheduler.namespace Namespace where kube-scheduler service is deployed.
##
namespace: kube-system
## If using kubeScheduler.endpoints only the port and targetPort are used
## @param kubeScheduler.service.enabled Whether or not to create a Service object for kube-scheduler
## @param kubeScheduler.service.ports.http Listening port of the kube scheduler Service object
## @param kubeScheduler.service.targetPorts.http Port to target on the kube scheduler Pods. This should be the port that kube scheduler is exposing metrics on
## @param kubeScheduler.service.selector Optional PODs Label selector for the service
##
service:
enabled: true
ports:
http: 10251
targetPorts:
http: 10251
## selector:
## component: kube-scheduler
##
selector: {}
## @param kubeScheduler.service.labels Additional labels for kube-scheduler service
##
labels: {}
serviceMonitor:
## @param kubeScheduler.serviceMonitor.interval Scrape interval (use by default, falling back to Prometheus' default)
##
interval: ""
## @param kubeScheduler.serviceMonitor.https Enable scraping kube-scheduler over https
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
##
https: false
## @param kubeScheduler.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
##
jobLabel: component
## @param kubeScheduler.serviceMonitor.insecureSkipVerify Skip TLS certificate validation when scraping
##
insecureSkipVerify: ""
## @param kubeScheduler.serviceMonitor.serverName Name of the server to use when validating TLS certificate
##
serverName: ""
## @param kubeScheduler.serviceMonitor.metricRelabelings Metric relabeling
## metricRelabelings:
## - action: keep
## regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
## sourceLabels: [__name__]
##
metricRelabelings: []
## @param kubeScheduler.serviceMonitor.relabelings Relabel configs
## relabelings:
## - sourceLabels: [__meta_kubernetes_pod_node_name]
## separator: ;
## regex: ^(.*)$
## targetLabel: nodename
## replacement: $1
## action: replace
##
relabelings: []
## @param kubeScheduler.serviceMonitor.labels Extra labels for the ServiceMonitor
##
labels: {}
## @param kubeScheduler.serviceMonitor.annotations Extra annotations for the ServiceMonitor
##
annotations: {}
## @param kubeScheduler.serviceMonitor.sampleLimit Per-scrape limit on number of scraped samples that will be accepted.
##
sampleLimit: ""
## Component scraping coreDns
##
coreDns:
## @param coreDns.enabled Create a ServiceMonitor to scrape coredns service
##
enabled: true
## @param coreDns.namespace Namespace where core dns service is deployed.
##
namespace: kube-system
## Create a ServiceMonitor to scrape coredns service
## @param coreDns.service.enabled Whether or not to create a Service object for coredns
## @param coreDns.service.ports.http Listening port of the coredns Service object
## @param coreDns.service.targetPorts.http Port to target on the coredns Pods. This should be the port that coredns is exposing metrics on
## @param coreDns.service.selector Optional PODs Label selector for the service
##
service:
enabled: true
ports:
http: 9153
targetPorts:
http: 9153
## selector:
## component: kube-dns
##
selector: {}
## @param coreDns.service.labels Additional labels for coredns service
##
labels: {}
serviceMonitor:
## @param coreDns.serviceMonitor.interval Scrape interval. If not set, the Prometheus default scrape interval is used.
##
interval: ""
## @param coreDns.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
##
jobLabel: k8s-app
## @param coreDns.serviceMonitor.metricRelabelings Metric relabel configs to apply to samples before ingestion.
## metricRelabelings:
## - action: keep
## regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
## sourceLabels: [__name__]
##
metricRelabelings: []
## @param coreDns.serviceMonitor.relabelings Relabel configs to apply to samples before ingestion.
## relabelings:
## - sourceLabels: [__meta_kubernetes_pod_node_name]
## separator: ;
## regex: ^(.*)$
## targetLabel: nodename
## replacement: $1
## action: replace
##
relabelings: []
## @param coreDns.serviceMonitor.labels Extra labels for the ServiceMonitor
##
labels: {}
## @param coreDns.serviceMonitor.annotations Extra annotations for the ServiceMonitor
##
annotations: {}
## @param coreDns.serviceMonitor.sampleLimit Per-scrape limit on number of scraped samples that will be accepted.
##
sampleLimit: ""
## Component scraping the kube-proxy
##
kubeProxy:
## @param kubeProxy.enabled Create a ServiceMonitor to scrape the kube-proxy Service
##
enabled: true
## @param kubeProxy.endpoints If your kube-proxy is not deployed as a pod, specify IPs it can be found on
## endpoints:
## - 10.141.4.22
## - 10.141.4.23
## - 10.141.4.24
##
endpoints: []
## @param kubeProxy.namespace Namespace where kube-proxy service is deployed.
##
namespace: kube-system
## @param kubeProxy.service.enabled Whether or not to create a Service object for kube-proxy
## @param kubeProxy.service.ports.http Listening port of the kube-proxy Service object
## @param kubeProxy.service.targetPorts.http Port to target on the kube-proxy Pods. This should be the port that kube-proxy is exposing metrics on
## @param kubeProxy.service.selector Optional PODs Label selector for the service
##
service:
enabled: true
ports:
http: 10249
targetPorts:
http: 10249
## selector:
## k8s-app: kube-proxy
##
selector: {}
## @param kubeProxy.service.labels Additional labels for kube-proxy service
##
labels: {}
serviceMonitor:
## @param kubeProxy.serviceMonitor.https Enable scraping kube-proxy over https.
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
##
https: false
## @param kubeProxy.serviceMonitor.interval Scrape interval (use by default, falling back to Prometheus' default)
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
interval: ""
## @param kubeProxy.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
##
jobLabel: k8s-app
## @param kubeProxy.serviceMonitor.metricRelabelings Metric relabeling
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
##
metricRelabelings: []
## @param kubeProxy.serviceMonitor.relabelings Relabel configs
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
##
relabelings: []
## @param kubeProxy.serviceMonitor.labels Extra labels for the ServiceMonitor
##
labels: {}
## @param kubeProxy.serviceMonitor.annotations Extra annotations for the ServiceMonitor
##
annotations: {}
## @param kubeProxy.serviceMonitor.sampleLimit Per-scrape limit on number of scraped samples that will be accepted.
##
sampleLimit: ""
## @section RBAC parameters
##
## Role Based Access
## ref: https://kubernetes.io/docs/admin/authorization/rbac/
## @param rbac.create Whether to create and use RBAC resources or not
## @param rbac.pspEnabled Whether to create a PodSecurityPolicy and bound it with RBAC. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later
##
rbac:
create: true
pspEnabled: true
## @section Thanos Ruler Parameters
##
thanosRuler:
## @param thanosRuler.enabled Enable/disable Thanos Ruler component
##
enabled: false
## Bitnami Thanos image
## ref: https://hub.docker.com/r/bitnami/thanos/tags/
## @param thanosRuler.image.registry [default: REGISTRY_NAME] Thanos image registry
## @param thanosRuler.image.repository Thanos image repository
## @skip thanosRuler.image.tag Thanos image tag
## @param thanosRuler.image.digest Thanos image digest
## @param thanosRuler.image.pullPolicy Thanos image pull policy
## @param thanosRuler.image.pullSecrets Specify docker-registry secret names as an array
##
image:
registry: docker.io
repository: bitnami/thanos
tag: 0.39.0-debian-12-r0
digest: ""
pullPolicy: IfNotPresent
pullSecrets: []
## @param thanosRuler.replicaCount Number of Thanos Ruler replicas to deploy
##
replicaCount: 1
## @param thanosRuler.paused When a ThanosRuler deployment is paused, no actions except for deletion will be performed on the underlying objects
##
paused: false
## @param thanosRuler.logFormat Log format for Thanos Ruler
##
logFormat: logfmt
## @param thanosRuler.logLevel Log level for Thanos ruler
##
logLevel: info
## @param thanosRuler.retention Time duration ThanosRuler shall retain data for
## Must match the regular expression [0-9]+(ms|s|m|h|d|w|y) (milliseconds seconds minutes hours days weeks years)
## The field has no effect when remote-write is configured since the Ruler operates in stateless mode
##
retention: "24h"
## @param thanosRuler.evaluationInterval Interval between consecutive evaluations
##
evaluationInterval: ""
## @param thanosRuler.labels Configures the external label pairs of the ThanosRuler resource
## A default replica label 'thanos_ruler_replica' will always be added as a label with the value of the pods name
##
labels: {}
## @param thanosRuler.storage Storage spec to specify how storage shall be used.
## ref: https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.StorageSpec
storage: {}
## @param thanosRuler.volumes Additional volumes on the output StatefulSet definition
## Volumes specified will be appended to other volumes that are generated as a result of thanosRuler.storage configuration
##
volumes: []
## @param thanosRuler.volumeMounts Additional VolumeMounts on the output StatefulSet definition.
## VolumeMounts specified will be appended to other VolumeMounts that are generated as a result of thanosRuler.storage configuration
##
volumeMounts: []
## @param thanosRuler.listenLocal Makes Thanos Ruler listen on loopback, so that it does not bind against the Pod IP
##
listenLocal: false
## @param thanosRuler.externalPrefix The external URL the Thanos Ruler instances will be available under. Maps to --web.external-prefix on Thanos Ruler
##
externalPrefix: ""
## Service parameters
##
service:
## @param thanosRuler.service.type Kubernetes service type
##
type: ClusterIP
## @param thanosRuler.service.ports.http Thanos Ruler service HTTP port
## @param thanosRuler.service.ports.grpc Thanos Ruler service GRPC port
##
ports:
http: 9090
grpc: 10901
## @param thanosRuler.service.nodePorts.http Specify the Thanos Ruler HTTP nodePort value for the LoadBalancer and NodePort service types
## @param thanosRuler.service.nodePorts.grpc Specify the Thanos Ruler GRPC nodePort value for the LoadBalancer and NodePort service types
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
nodePorts:
http: ""
grpc: ""
## @param thanosRuler.service.clusterIP Thanos Ruler service clusterIP IP
## e.g:
## clusterIP: None
##
clusterIP: ""
## @param thanosRuler.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer`
## Set the LoadBalancer service type to internal only
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
loadBalancerIP: ""
## @param thanosRuler.service.loadBalancerSourceRanges Address that are allowed when service is LoadBalancer
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
## e.g:
## loadBalancerSourceRanges:
## - 10.10.10.0/24
##
loadBalancerSourceRanges: [ ]
## @param thanosRuler.service.externalTrafficPolicy Thanos Ruler service externalTrafficPolicy
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
##
externalTrafficPolicy: Cluster
## @param thanosRuler.service.labels Extra labels for Thanos Ruler service
##
labels: { }
## @param thanosRuler.service.annotations Annotations for Thanos Ruler service
##
annotations: { }
## @param thanosRuler.service.extraPorts Extra ports to expose in the Thanos Ruler service
##
extraPorts: [ ]
## @param thanosRuler.service.labelSelectorsOverride Selector for Thanos Query service
##
labelSelectorsOverride: { }
## @param thanosRuler.service.additionalHeadless Additional Headless service
##
additionalHeadless: false
## Headless service properties
##
headless:
## @param thanosRuler.service.headless.annotations Annotations for the headless service.
##
annotations: { }
## Network Policies
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
##
networkPolicy:
## @param thanosRuler.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
##
enabled: true
## @param thanosRuler.networkPolicy.allowExternal Don't require client label for connections
## The Policy model to apply. When set to false, only pods with the correct
## client label will have network access to the ports the application is listening
## on. When true, the app will accept connections from any source (with the correct destination port).
##
allowExternal: true
## @param thanosRuler.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
## If set to 'false', set 'extraEgress' to allow communicating to your Thanos Query/Frontend Query services.
##
allowExternalEgress: true
## @param thanosRuler.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraIngress:
## - ports:
## - port: 1234
## from:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
extraIngress: [ ]
## @param thanosRuler.networkPolicy.extraEgress [array] Add extra egress rules to the NetworkPolicy
## e.g:
## extraEgress:
## - ports:
## - port: 1234
## to:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
##
extraEgress: []
## @param thanosRuler.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
## @param thanosRuler.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
##
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## @param thanosRuler.routePrefix Prefix used to register routes. Useful for proxies that rewrite URLs.
##
routePrefix: /
## Configure the ingress resource that allows you to access Thanos Ruler
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
##
ingress:
## @param thanosRuler.ingress.enabled Enable ingress controller resource
##
enabled: false
## @param thanosRuler.ingress.hostname Default host for the ingress resource
##
hostname: thanos-ruler.local
## @param thanosRuler.ingress.ingressClassName IngressClass that will be used to implement the Ingress (Kubernetes 1.18+)
## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
##
ingressClassName: ""
## @param thanosRuler.ingress.labels Additional label for the Ingress resource.
## Use this parameter to set the required labels for your needs
## e.g.:
## labels:
## dns-managed-by-external-dns: 'true'
##
labels: {}
## @param thanosRuler.ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md
## Use this parameter to set the required annotations for cert-manager, see
## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
##
## e.g.:
## annotations:
## kubernetes.io/ingress.class: nginx
## cert-manager.io/cluster-issuer: cluster-issuer-name
##
annotations: {}
## @param thanosRuler.ingress.extraHosts The list of additional hostnames to be covered with this ingress record.
## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
## extraHosts:
## - name: thanos.local
## path: /
## pathType: ImplementationSpecific
##
extraHosts: []
## @param thanosRuler.ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record.
## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
## extraTls:
## - hosts:
## - thanos.local
## secretName: thanos.local-tls
##
extraTls: []
## @param thanosRuler.ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
## e.g:
## - name: thanos.local-tls
## key:
## certificate:
##
secrets: []
## @param thanosRuler.ingress.extraRules Additional rules to be covered with this ingress record
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
## e.g.:
## extraRules:
## - host: example.local
## http:
## path: /
## backend:
## service:
## name: example-svc
## port:
## name: http
##
extraRules: []
## @param thanosRuler.ingress.apiVersion Force Ingress API version (automatically detected if not set)
##
apiVersion: ""
## @param thanosRuler.ingress.path Ingress path
##
path: /
## @param thanosRuler.ingress.pathType Ingress path type
##
pathType: ImplementationSpecific
## @param thanosRuler.ingress.tls Enable TLS configuration for the hostname defined at `thanosRuler.ingress.hostname` parameter
## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.thanosRuler.ingress.hostname }}`
## You can:
## - Use the `thanosRuler.ingress.secrets` parameter to create this TLS secret
## - Rely on cert-manager to create it by setting the corresponding annotations
## - Rely on Helm to create self-signed certificates by setting `thanosRuler.ingress.selfSigned=true`
##
tls: false
## @param thanosRuler.ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
##
selfSigned: false
## Service account for Thanos Ruler to use
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
##
serviceAccount:
## @param thanosRuler.serviceAccount.create Specify whether to create a ServiceAccount for Thanos Ruler
##
create: true
## @param thanosRuler.serviceAccount.name The name of the ServiceAccount to create
## If not set and create is true, a name is generated using the kube-prometheus.thanosRuler.fullname template
##
name: ""
## @param thanosRuler.serviceAccount.annotations Additional annotations for the ServiceAccount
## annotations:
## eks.amazonaws.com/role-arn: arn:aws:iam::ACCOUNT:role/thanosruler
##
annotations: {}
## @param thanosRuler.serviceAccount.automountServiceAccountToken Automount service account token for the server service account
##
automountServiceAccountToken: false
## Thanos Ruler pods' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param thanosRuler.podSecurityContext.enabled Enable security context
## @param thanosRuler.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
## @param thanosRuler.podSecurityContext.sysctls Set kernel settings using the sysctl interface
## @param thanosRuler.podSecurityContext.supplementalGroups Set filesystem extra groups
## @param thanosRuler.podSecurityContext.fsGroup Group ID for the container filesystem
##
podSecurityContext:
enabled: true
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
fsGroup: 1001
## @param thanosRuler.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge).
## This is ignored if operator.resources is set (operator.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "nano"
## @param thanosRuler.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
##
resources: {}
## @param thanosRuler.containerPorts.http HTTP container port
## @param thanosRuler.containerPorts.grpc GRPC container port
##
containerPorts:
http: 10902
grpc: 10901
## @param thanosRuler.alertQueryUrl The external Query URL the Thanos Ruler will set in the Source field of all alerts
## Maps to the alert.query-url CLI arg
##
alertQueryUrl: ""
## Configuration for connecting to alertmanager
## Maps to --alertmanagers.config Thanos Ruler argument
## NOTE: This field takes precedence over alertmanagersUrl.
##
alertmanagersConfig:
## @param thanosRuler.alertmanagersConfig.existingSecret.name Name of an existing secret to use for Alert Manager config
## If configured, thanosRuler.alertmanagersConfig.config will not be used
## @param thanosRuler.alertmanagersConfig.existingSecret.key Name of a key in the existing secret to use for Alert Manager config
##
existingSecret:
name: ""
key: ""
## @param thanosRuler.alertmanagersConfig.config Alert Manager configuration
## Unused if thanosRuler.alertmanagersConfig.existingSecret.name is configured
## If empty, Thanos Ruler will use this chart's Alertmanager when 'alertmanager.enabled' is 'true'
## ref: https://thanos.io/tip/components/rule.md/#alertmanager
## e.g:
## alertmanagers:
## - http_config:
## basic_auth:
## username: some_user
## password: some_pass
## static_configs:
## - alertmanager.thanos.io
## scheme: http
## timeout: 10s
## api_version: v2
##
config: {}
## @param thanosRuler.alertDropLabels Configures the label names which should be dropped in Thanos Ruler alerts
## The replica label `thanos_ruler_replica` will always be dropped from the alerts.
##
alertDropLabels: []
## Configures the list of Thanos Query endpoints from which to query metrics.
## Maps to --query.config Thanos Ruler argument
## The configuration format is defined at https://thanos.io/tip/components/rule.md/#query-api
queryConfig:
## @param thanosRuler.queryConfig.existingSecret.name Name of an existing secret to use for Alert Manager config
## If configured, thanosRuler.queryConfig.config will not be used
## @param thanosRuler.queryConfig.existingSecret.key Key in the existing secret to use for Query config
##
existingSecret:
name: ""
key: "query-config.yaml"
## @param thanosRuler.queryConfig.config
## Unused if thanosRuler.queryConfig.existingSecret.name is configured
##
## e.g:
## config:
## - static_configs:
## - "dnssrv+_http._tcp.thanos-query.thanos.svc.cluster.local"
config: []
## Configures object storage
## Maps to --objstore.config Thanos Ruler argument
## ref: https://thanos.io/tip/thanos/storage.md/#configuring-access-to-object-storage
##
objectStorageConfig:
## @param thanosRuler.objectStorageConfig.existingSecret.name Name of an existing secret to use for Object Storage config
## If configured, thanosRuler.objectStorageConfig.config will not be used
## @param thanosRuler.objectStorageConfig.existingSecret.key Key in the existing secret to use for Object Storage config
##
existingSecret:
name: ""
key: ""
## @param thanosRuler.objectStorageConfig.config
## Unused if thanosRuler.objectStorageConfig.existingSecret.name is configured
##
config: {}
## @param thanosRuler.ruleNamespaceSelector Namespaces to be selected for PrometheusRules discovery
## If nil, select own namespace
##
ruleNamespaceSelector: {}
## @param thanosRuler.ruleSelector PrometheusRule selector labels
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md
## If nil, select all PrometheusRules
##
ruleSelector: {}
## @param thanosRuler.evalInterval How frequently to evaluate rules
##
evalInterval: "1m"
## @param thanosRuler.clusterName Used to set the 'ruler_cluster' label
##
clusterName: ""
## @param thanosRuler.additionalArgs [array] Additional arguments for the ThanosRuler container.
## It is intended for e.g. activating hidden flags which are not supported by the dedicated configuration options yet.
##
additionalArgs:
- name: grpc-address
value: "0.0.0.0:{{ .Values.thanosRuler.containerPorts.grpc }}"
- name: http-address
value: "0.0.0.0:{{ .Values.thanosRuler.containerPorts.http }}"
## Thanos Ruler Pod Disruption Budget configuration
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
## @param thanosRuler.pdb.create Enable/disable a Pod Disruption Budget creation for Thanos Ruler
## @param thanosRuler.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
## @param thanosRuler.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable
##
pdb:
create: true
minAvailable: ""
maxUnavailable: ""
## @param thanosRuler.nodeSelector Node labels for Thanos Ruler pods assignment
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
##
nodeSelector: {}
## Create a servicemonitor for Thanos ruler
##
serviceMonitor:
## @param thanosRuler.serviceMonitor.enabled Creates a ServiceMonitor to monitor Thanos Ruler
##
enabled: true
## @param thanosRuler.serviceMonitor.https Enable scraping Thanos Ruler over https.
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
##
https: false
## @param thanosRuler.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
##
jobLabel: ""
## @param thanosRuler.serviceMonitor.interval Scrape interval (use by default, falling back to Prometheus' default)
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
interval: ""
## @param thanosRuler.serviceMonitor.metricRelabelings Metric relabeling
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
##
metricRelabelings: [ ]
## @param thanosRuler.serviceMonitor.relabelings Relabel configs
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
##
relabelings: [ ]
## @param thanosRuler.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
scrapeTimeout: ""
## @param thanosRuler.serviceMonitor.labels Extra labels for the ServiceMonitor
##
labels: { }
## @param thanosRuler.serviceMonitor.annotations Extra annotations for the ServiceMonitor
##
annotations: { }
## @param thanosRuler.serviceMonitor.extraParameters Any extra parameter to be added to the endpoint configured in the ServiceMonitor
## (e.g. tlsConfig for further customization of the HTTPS behavior)
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.Endpoint
##
extraParameters: { }
## @param thanosRuler.serviceMonitor.sampleLimit Per-scrape limit on number of scraped samples that will be accepted.
##
sampleLimit: ""
## @param thanosRuler.podAffinityPreset Prometheus Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param thanosRuler.podAntiAffinityPreset Thanos Ruler Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## Node affinity preset
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
##
nodeAffinityPreset:
## @param thanosRuler.nodeAffinityPreset.type Thanos Ruler Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
##
type: ""
## @param thanosRuler.nodeAffinityPreset.key Thanos Ruler Node label key to match. Ignored if `affinity` is set
## E.g.
## key: "kubernetes.io/e2e-az-name"
##
key: ""
## @param thanosRuler.nodeAffinityPreset.values Thanos Ruler Node label values to match. Ignored if `affinity` is set
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## @param thanosRuler.affinity Thanos Ruler Affinity for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## Note: thanosRuler.podAffinityPreset, thanosRuler.podAntiAffinityPreset, and thanosRuler.nodeAffinityPreset will be ignored when it's set
##
affinity: {}
## @param thanosRuler.podMetadata [object] Standard object's metadata
## ref: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
##
podMetadata:
labels: {}
annotations: {}
## @param thanosRuler.tolerations Thanos Ruler Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param thanosRuler.topologySpreadConstraints Topology Spread Constraints for pod assignment
## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
## The value is evaluated as a template
##
topologySpreadConstraints: []
## @param thanosRuler.containers Containers allows injecting additional containers or modifying operator generated containers
## The current container names are: thanos-ruler and config-reloader
##
containers: []
## @param thanosRuler.initContainers InitContainers allows adding initContainers to the pod definition
## Those can be used to e.g. fetch secrets for injection into the ThanosRuler configuration from external sources.
##
initContainers: []
## @param thanosRuler.priorityClassName Priority class assigned to the Pods
##
priorityClassName: ""
## @param thanosRuler.portName Port name used for the pods and governing service
##
portName: web
## @param thanosRuler.web Defines the configuration of the ThanosRuler web server
##
web: {}
## @param thanosRuler.remoteWrite Defines the list of remote write configurations
## When the list isnt empty, the ruler is configured with stateless mode
## ref: https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.RemoteWriteSpec
##
remoteWrite: {}