Files
charts/bitnami/dataplatform-bp2/values.yaml
2022-03-09 16:53:13 +00:00

1362 lines
57 KiB
YAML

## @section Global parameters
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
##
## @param global.imageRegistry Global Docker image registry
## @param global.imagePullSecrets Global Docker registry secret names as an array
## @param global.storageClass Global StorageClass for Persistent Volume(s)
##
global:
imageRegistry: ""
## E.g.
## imagePullSecrets:
## - myRegistryKeySecretName
##
imagePullSecrets: []
storageClass: ""
## @section Common parameters
## @param commonLabels Labels to add to all deployed objects
##
commonLabels: {}
## @param commonAnnotations Annotations to add to all deployed objects
##
commonAnnotations: {}
## @param extraDeploy Array of extra objects to deploy with the release
##
extraDeploy: []
## @section Data Platform Chart parameters
## Configuration for the dataplatform prometheus exporter
##
dataplatform:
serviceAccount:
## @param dataplatform.serviceAccount.create Specifies whether a ServiceAccount should be created
##
create: true
## @param dataplatform.serviceAccount.name The name of the ServiceAccount to create
## If not set and create is true, a name is generated using the fullname template
##
name: ""
## @param dataplatform.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
## Can be set to false if pods using this serviceAccount do not need to use K8s API
##
automountServiceAccountToken: true
## Role Based Access
## ref: https://kubernetes.io/docs/admin/authorization/rbac/
##
rbac:
## @param dataplatform.rbac.create Whether to create & use RBAC resources or not
## binding dataplatform ServiceAccount to a role
## that allows dataplatform pods querying the K8s API
##
create: true
exporter:
## @param dataplatform.exporter.enabled Start a prometheus exporter
##
enabled: true
## Data Platform BP2 exporter image
## ref: https://hub.docker.com/r/bitnami/dataplatform-exporter/tags/
## @param dataplatform.exporter.image.registry dataplatform exporter image registry
## @param dataplatform.exporter.image.repository dataplatform exporter image repository
## @param dataplatform.exporter.image.tag dataplatform exporter image tag (immutable tags are recommended)
## @param dataplatform.exporter.image.pullPolicy dataplatform exporter image pull policy
## @param dataplatform.exporter.image.pullSecrets Specify docker-registry secret names as an array
##
image:
registry: docker.io
repository: bitnami/dataplatform-exporter
tag: 1.0.1-scratch-r16
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Example:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## Configuration file passed to the exporter.
## This exporter metrics configuration is used to emit only the health and state metrics configured below.
## In the below config metrics key,name and guage should not be changed.
## @param dataplatform.exporter.config [string] Data Platform Metrics Configuration emitted in Prometheus format
##
config: |
{
"blueprintName": "bp2",
"metrics": [
{
"name": "zookeeper_desired_nodes",
"type": "gauge",
"helpMessage": "Desired number of zookeeper nodes in the data platform",
"key": "zookeeper",
"dataComponent": "DesiredNodes"
},
{
"name": "zookeeper_available_nodes",
"type": "gauge",
"helpMessage": "Available number of zookeeper nodes in the data platform",
"key": "zookeeper",
"dataComponent": "AvailableNodes"
},
{
"name": "kafka_desired_nodes",
"type": "gauge",
"helpMessage": "Desired number of kafka nodes in the data platform",
"key": "kafka",
"dataComponent": "DesiredNodes"
},
{
"name": "kafka_available_nodes",
"type": "gauge",
"helpMessage": "Available number of kafka nodes in the data platform",
"key": "kafka",
"dataComponent": "AvailableNodes"
},
{
"name": "elasticsearch_master_desired_nodes",
"type": "gauge",
"helpMessage": "Desired number of elasticsearch master nodes in the data platform",
"key": "elasticsearch-master",
"dataComponent": "DesiredNodes"
},
{
"name": "elasticsearch_master_available_nodes",
"type": "gauge",
"helpMessage": "Available number of elasticsearch master nodes in the data platform",
"key": "elasticsearch-master",
"dataComponent": "AvailableNodes"
},
{
"name": "elasticsearch_data_desired_nodes",
"type": "gauge",
"helpMessage": "Desired number of elasticsearch data nodes in the data platform",
"key": "elasticsearch-data",
"dataComponent": "DesiredNodes"
},
{
"name": "elasticsearch_data_available_nodes",
"type": "gauge",
"helpMessage": "Available number of elasticsearch data nodes in the data platform",
"key": "elasticsearch-data",
"dataComponent": "AvailableNodes"
},
{
"name": "spark_master_desired_nodes",
"type": "gauge",
"helpMessage": "Desired number of spark master nodes in the data platform",
"key": "spark-master",
"dataComponent": "DesiredNodes"
},
{
"name": "spark_master_available_nodes",
"type": "gauge",
"helpMessage": "Available number of spark master nodes in the data platform",
"key": "spark-master",
"dataComponent": "AvailableNodes"
},
{
"name": "spark_worker_desired_nodes",
"type": "gauge",
"helpMessage": "Desired number of spark worker nodes in the data platform",
"key": "spark-worker",
"dataComponent": "DesiredNodes"
},
{
"name": "spark_worker_available_nodes",
"type": "gauge",
"helpMessage": "Available number of spark worker nodes in the data platform",
"key": "spark-worker",
"dataComponent": "AvailableNodes"
},
{
"name": "logstash_desired_nodes",
"type": "gauge",
"helpMessage": "Desired number of logstash nodes in the data platform",
"key": "logstash",
"dataComponent": "DesiredNodes"
},
{
"name": "logstash_available_nodes",
"type": "gauge",
"helpMessage": "Available number of logstash nodes in the data platform",
"key": "logstash",
"dataComponent": "AvailableNodes"
}
]
}
## Configure extra options for liveness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param dataplatform.exporter.livenessProbe.enabled Enable livenessProbe
## @param dataplatform.exporter.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
## @param dataplatform.exporter.livenessProbe.periodSeconds Period seconds for livenessProbe
## @param dataplatform.exporter.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
## @param dataplatform.exporter.livenessProbe.failureThreshold Failure threshold for livenessProbe
## @param dataplatform.exporter.livenessProbe.successThreshold Success threshold for livenessProbe
##
livenessProbe:
enabled: true
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 15
failureThreshold: 15
successThreshold: 1
## Configure extra options for readiness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param dataplatform.exporter.readinessProbe.enabled Enable readinessProbe
## @param dataplatform.exporter.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
## @param dataplatform.exporter.readinessProbe.periodSeconds Period seconds for readinessProbe
## @param dataplatform.exporter.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
## @param dataplatform.exporter.readinessProbe.failureThreshold Failure threshold for readinessProbe
## @param dataplatform.exporter.readinessProbe.successThreshold Success threshold for readinessProbe
##
readinessProbe:
enabled: true
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 15
failureThreshold: 15
successThreshold: 15
## Configure extra options for startup probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-startup-probes/#configure-probes
## @param dataplatform.exporter.startupProbe.enabled Enable startupProbe
## @param dataplatform.exporter.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
## @param dataplatform.exporter.startupProbe.periodSeconds Period seconds for startupProbe
## @param dataplatform.exporter.startupProbe.timeoutSeconds Timeout seconds for startupProbe
## @param dataplatform.exporter.startupProbe.failureThreshold Failure threshold for startupProbe
## @param dataplatform.exporter.startupProbe.successThreshold Success threshold for startupProbe
##
startupProbe:
enabled: false
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 15
failureThreshold: 15
successThreshold: 15
## @param dataplatform.exporter.containerPorts.http Data Platform Prometheus exporter port
##
containerPorts:
http: 9090
## @param dataplatform.exporter.priorityClassName exporter priorityClassName
## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
##
priorityClassName: ""
## @param dataplatform.exporter.command Override Data Platform Exporter entrypoint string.
##
command: []
## @param dataplatform.exporter.args Arguments for the provided command if needed
##
args: []
## Exporter resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## We usually recommend not to specify default resources and to leave this as a conscious
## choice for the user. This also increases chances charts run on environments with little
## resources, such as Minikube. If you do want to specify resources, uncomment the following
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
## @param dataplatform.exporter.resources.limits The resources limits for the container
## @param dataplatform.exporter.resources.requests The requested resources for the container
##
resources:
## Example:
## limits:
## cpu: 200m
## memory: 256Mi
##
limits: {}
## Examples:
## requests:
## cpu: 200m
## memory: 10Mi
##
requests: {}
## dataplatform exporter containers' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param dataplatform.exporter.containerSecurityContext.enabled Enable Data Platform exporter containers' Security Context
## @param dataplatform.exporter.containerSecurityContext.runAsUser User ID for the containers.
## @param dataplatform.exporter.containerSecurityContext.runAsNonRoot Enable Data Platform exporter containers' Security Context runAsNonRoot
##
containerSecurityContext:
enabled: true
runAsUser: 1001
runAsNonRoot: true
## dataplatform exporter pods' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param dataplatform.exporter.podSecurityContext.enabled Enable Data Platform exporter pods' Security Context
## @param dataplatform.exporter.podSecurityContext.fsGroup Group ID for the pods.
##
podSecurityContext:
enabled: true
fsGroup: 1001
## @param dataplatform.exporter.podAffinityPreset Data Platform exporter pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param dataplatform.exporter.podAntiAffinityPreset Data Platform exporter pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## Node affinity preset
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
##
nodeAffinityPreset:
## @param dataplatform.exporter.nodeAffinityPreset.type Data Platform exporter node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
##
type: ""
## @param dataplatform.exporter.nodeAffinityPreset.key Data Platform exporter node label key to match Ignored if `affinity` is set.
## E.g.
## key: "kubernetes.io/e2e-az-name"
##
key: ""
## @param dataplatform.exporter.nodeAffinityPreset.values Data Platform exporter node label values to match. Ignored if `affinity` is set.
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## @param dataplatform.exporter.affinity Affinity settings for exporter pod assignment. Evaluated as a template
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## @param dataplatform.exporter.nodeSelector Node labels for exporter pods assignment. Evaluated as a template
## ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## @param dataplatform.exporter.tolerations Tolerations for exporter pods assignment. Evaluated as a template
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param dataplatform.exporter.podLabels Additional labels for Metrics exporter pod
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
##
podLabels: {}
## @param dataplatform.exporter.podAnnotations Additional annotations for Metrics exporter pod
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## @param dataplatform.exporter.customLivenessProbe Override default liveness probe
##
customLivenessProbe: {}
## @param dataplatform.exporter.customReadinessProbe Override default readiness probe
##
customReadinessProbe: {}
## @param dataplatform.exporter.customStartupProbe Override default startup probe
##
customStartupProbe: {}
## Update strategy
## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
## terminate the single previous pod, so that the new, incoming pod can attach to the PV
## @param dataplatform.exporter.updateStrategy.type Update strategy - only really applicable for deployments with RWO PVs attached
## @param dataplatform.exporter.updateStrategy.rollingUpdate Deployment rolling update configuration parameters
##
updateStrategy:
type: RollingUpdate
rollingUpdate: {}
## @param dataplatform.exporter.extraEnvVars Additional environment variables to set
## Example:
## extraEnvVars:
## - name: FOO
## value: "bar"
##
extraEnvVars: []
## @param dataplatform.exporter.extraEnvVarsCM ConfigMap with extra environment variables
##
extraEnvVarsCM: ""
## @param dataplatform.exporter.extraEnvVarsSecret Secret with extra environment variables
##
extraEnvVarsSecret: ""
## @param dataplatform.exporter.extraVolumes Extra volumes to add to the deployment
##
extraVolumes: []
## @param dataplatform.exporter.extraVolumeMounts Extra volume mounts to add to the container
##
extraVolumeMounts: []
## @param dataplatform.exporter.initContainers Add init containers to the %%MAIN_CONTAINER_NAME%% pods
## Example:
## initContainers:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
initContainers: []
## @param dataplatform.exporter.sidecars Add sidecars to the %%MAIN_CONTAINER_NAME%% pods
## Example:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
sidecars: []
## Service for the Data Platform exporter deployment
##
service:
## @param dataplatform.exporter.service.type Service type for default Data Platform Prometheus exporter service
##
type: ClusterIP
## @param dataplatform.exporter.service.annotations [object] Exporter service annotations
##
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9090"
prometheus.io/path: "/metrics"
## @param dataplatform.exporter.service.labels Additional labels for Data Platform exporter service
##
labels: {}
## @param dataplatform.exporter.service.ports.http Kubernetes Service port
##
ports:
http: 9090
## @param dataplatform.exporter.service.loadBalancerIP Load balancer IP for the Data Platform Exporter Service (optional, cloud specific)
## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer
##
loadBalancerIP: ""
## @param dataplatform.exporter.service.nodePorts.http Node ports for the HTTP exporter service
## nodePorts:
## http: <to set explicitly, choose port between 30000-32767>
## https: <to set explicitly, choose port between 30000-32767>
##
nodePorts:
http: ""
## @param dataplatform.exporter.service.loadBalancerSourceRanges Exporter Load Balancer Source ranges
## loadBalancerSourceRanges:
## - 10.10.10.0/24
##
loadBalancerSourceRanges: []
## @param dataplatform.exporter.hostAliases Deployment pod host aliases
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
##
hostAliases: []
emitter:
## @param dataplatform.emitter.enabled Start Data Platform metrics emitter
##
enabled: true
## Data Platform BP2 emitter image
## ref: https://hub.docker.com/r/bitnami/dataplatform-emitter/tags/
## @param dataplatform.emitter.image.registry Data Platform emitter image registry
## @param dataplatform.emitter.image.repository Data Platform emitter image repository
## @param dataplatform.emitter.image.tag Data Platform emitter image tag (immutable tags are recommended)
## @param dataplatform.emitter.image.pullPolicy Data Platform emitter image pull policy
## @param dataplatform.emitter.image.pullSecrets Specify docker-registry secret names as an array
##
image:
registry: docker.io
repository: bitnami/dataplatform-emitter
tag: 1.0.1-scratch-r17
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Example:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## Configure extra options for liveness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param dataplatform.emitter.livenessProbe.enabled Enable livenessProbe
## @param dataplatform.emitter.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
## @param dataplatform.emitter.livenessProbe.periodSeconds Period seconds for livenessProbe
## @param dataplatform.emitter.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
## @param dataplatform.emitter.livenessProbe.failureThreshold Failure threshold for livenessProbe
## @param dataplatform.emitter.livenessProbe.successThreshold Success threshold for livenessProbe
##
livenessProbe:
enabled: true
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 15
failureThreshold: 15
successThreshold: 1
## Configure extra options for readiness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param dataplatform.emitter.readinessProbe.enabled Enable readinessProbe
## @param dataplatform.emitter.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
## @param dataplatform.emitter.readinessProbe.periodSeconds Period seconds for readinessProbe
## @param dataplatform.emitter.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
## @param dataplatform.emitter.readinessProbe.failureThreshold Failure threshold for readinessProbe
## @param dataplatform.emitter.readinessProbe.successThreshold Success threshold for readinessProbe
##
readinessProbe:
enabled: true
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 15
failureThreshold: 15
successThreshold: 15
## Configure extra options for startup probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-startup-probes/#configure-probes
## @param dataplatform.emitter.startupProbe.enabled Enable startupProbe
## @param dataplatform.emitter.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
## @param dataplatform.emitter.startupProbe.periodSeconds Period seconds for startupProbe
## @param dataplatform.emitter.startupProbe.timeoutSeconds Timeout seconds for startupProbe
## @param dataplatform.emitter.startupProbe.failureThreshold Failure threshold for startupProbe
## @param dataplatform.emitter.startupProbe.successThreshold Success threshold for startupProbe
##
startupProbe:
enabled: false
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 15
failureThreshold: 15
successThreshold: 15
## @param dataplatform.emitter.containerPorts.http Data Platform emitter port
##
containerPorts:
http: 8091
## @param dataplatform.emitter.priorityClassName exporter priorityClassName
## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
##
priorityClassName: ""
## @param dataplatform.emitter.command Override Data Platform entrypoint string.
##
command: []
## @param dataplatform.emitter.args Arguments for the provided command if needed
##
args: []
## Data Platform metrics emitter resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## We usually recommend not to specify default resources and to leave this as a conscious
## choice for the user. This also increases chances charts run on environments with little
## resources, such as Minikube. If you do want to specify resources, uncomment the following
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
## @param dataplatform.emitter.resources.limits The resources limits for the container
## @param dataplatform.emitter.resources.requests The requested resources for the container
##
resources:
## Example:
## limits:
## cpu: 200m
## memory: 256Mi
##
limits: {}
## Examples:
## requests:
## cpu: 200m
## memory: 10Mi
##
requests: {}
## Data Platform emitter containers' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param dataplatform.emitter.containerSecurityContext.enabled Enable Data Platform emitter containers' Security Context
## @param dataplatform.emitter.containerSecurityContext.runAsUser User ID for the containers.
## @param dataplatform.emitter.containerSecurityContext.runAsNonRoot Enable Data Platform emitter containers' Security Context runAsNonRoot
##
containerSecurityContext:
enabled: true
runAsUser: 1001
runAsNonRoot: true
## Data Platform emitter pods' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param dataplatform.emitter.podSecurityContext.enabled Enable Data Platform emitter pods' Security Context
## @param dataplatform.emitter.podSecurityContext.fsGroup Group ID for the pods.
##
podSecurityContext:
enabled: true
fsGroup: 1001
## @param dataplatform.emitter.podAffinityPreset Data Platform emitter pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param dataplatform.emitter.podAntiAffinityPreset Data Platform emitter pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## Node affinity preset
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
##
nodeAffinityPreset:
## @param dataplatform.emitter.nodeAffinityPreset.type Data Platform emitter node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
##
type: ""
## @param dataplatform.emitter.nodeAffinityPreset.key Data Platform emitter node label key to match Ignored if `affinity` is set.
## E.g.
## key: "kubernetes.io/e2e-az-name"
##
key: ""
## @param dataplatform.emitter.nodeAffinityPreset.values Data Platform emitter node label values to match. Ignored if `affinity` is set.
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## @param dataplatform.emitter.affinity Affinity settings for emitter pod assignment. Evaluated as a template
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## @param dataplatform.emitter.nodeSelector Node labels for emitter pods assignment. Evaluated as a template
## ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## @param dataplatform.emitter.tolerations Tolerations for emitter pods assignment. Evaluated as a template
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param dataplatform.emitter.podLabels Additional labels for Metrics emitter pod
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
##
podLabels: {}
## @param dataplatform.emitter.podAnnotations Additional annotations for Metrics emitter pod
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## @param dataplatform.emitter.customLivenessProbe Override default liveness probe%%MAIN_CONTAINER_NAME%%
##
customLivenessProbe: {}
## @param dataplatform.emitter.customReadinessProbe Override default readiness probe%%MAIN_CONTAINER_NAME%%
##
customReadinessProbe: {}
## @param dataplatform.emitter.customStartupProbe Override default startup probe
##
customStartupProbe: {}
## Update strategy
## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
## terminate the single previous pod, so that the new, incoming pod can attach to the PV
## @param dataplatform.emitter.updateStrategy.type Update strategy - only really applicable for deployments with RWO PVs attached
## @param dataplatform.emitter.updateStrategy.rollingUpdate Deployment rolling update configuration parameters
##
updateStrategy:
type: RollingUpdate
rollingUpdate: {}
## @param dataplatform.emitter.extraEnvVars Additional environment variables to set
## Example:
## extraEnvVars:
## - name: FOO
## value: "bar"
##
extraEnvVars: []
## @param dataplatform.emitter.extraEnvVarsCM ConfigMap with extra environment variables
##
extraEnvVarsCM: ""
## @param dataplatform.emitter.extraEnvVarsSecret Secret with extra environment variables
##
extraEnvVarsSecret: ""
## @param dataplatform.emitter.extraVolumes Extra volumes to add to the deployment
##
extraVolumes: []
## @param dataplatform.emitter.extraVolumeMounts Extra volume mounts to add to the container
##
extraVolumeMounts: []
## @param dataplatform.emitter.initContainers Add init containers to the %%MAIN_CONTAINER_NAME%% pods
## Example:
## initContainers:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
initContainers: []
## @param dataplatform.emitter.sidecars Add sidecars to the %%MAIN_CONTAINER_NAME%% pods
## Example:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
sidecars: []
## Service for the Data Platform emitter deployment
##
service:
## @param dataplatform.emitter.service.type Service type for default Data Platform metrics emitter service
##
type: ClusterIP
## @param dataplatform.emitter.service.annotations annotations for Data Platform emitter service
##
annotations: {}
## @param dataplatform.emitter.service.labels Additional labels for Data Platform emitter service
##
labels: {}
## @param dataplatform.emitter.service.ports.http Kubernetes Service port
##
ports:
http: 8091
## @param dataplatform.emitter.service.loadBalancerIP Load balancer IP for the dataplatform emitter Service (optional, cloud specific)
## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer
##
loadBalancerIP: ""
## @param dataplatform.emitter.service.nodePorts.http Node ports for the HTTP emitter service
## nodePorts:
## http: <to set explicitly, choose port between 30000-32767>
## https: <to set explicitly, choose port between 30000-32767>
##
nodePorts:
http: ""
## @param dataplatform.emitter.service.loadBalancerSourceRanges Data Platform Emitter Load Balancer Source ranges
## loadBalancerSourceRanges:
## - 10.10.10.0/24
##
loadBalancerSourceRanges: []
## @param dataplatform.emitter.hostAliases Deployment pod host aliases
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
##
hostAliases: []
## @section Kafka parameters
##
kafka:
## @param kafka.enabled Enable Kafka subchart
##
enabled: true
## @param kafka.replicaCount Number of Kafka brokers
##
replicaCount: 3
## @param kafka.heapOpts Kafka Java Heap size
##
heapOpts: -Xmx4096m -Xms4096m
## Recommended values for cpu and memory requests
## @param kafka.resources.limits Resource limits for Kafka
## @param kafka.resources.requests.cpu CPU capacity request for Kafka nodes
## @param kafka.resources.requests.memory Memory capacity request for Kafka nodes
##
resources:
limits: {}
requests:
cpu: 250m
memory: 5120Mi
## Anti Affinity rules set for resiliency and Affinity rules set for optimal performance
## @param kafka.affinity.podAntiAffinity [object] Kafka anti affinity rules
## @skip kafka.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution
## @param kafka.affinity.podAffinity [object] Kafka affinity rules
## @skip kafka.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution
##
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- kafka
- key: app.kubernetes.io/instance
operator: In
values:
- "{{ .Release.Name }}"
topologyKey: "kubernetes.io/hostname"
podAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- zookeeper
- key: app.kubernetes.io/instance
operator: In
values:
- "{{ .Release.Name }}"
topologyKey: "kubernetes.io/hostname"
## Prometheus Exporters / Metrics
##
metrics:
## Prometheus Kafka Exporter: exposes complimentary metrics to JMX Exporter
##
kafka:
## @param kafka.metrics.kafka.enabled Enable prometheus exporter for Kafka
##
enabled: false
## Prometheus Kafka Exporter' resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## @param kafka.metrics.kafka.resources.limits Resource limits for kafka prometheus exporter
## @param kafka.metrics.kafka.resources.requests.cpu CPU capacity request for Kafka prometheus nodes
## @param kafka.metrics.kafka.resources.requests.memory Memory capacity request for Kafka prometheus nodes
##
resources:
limits: {}
requests:
cpu: 100m
memory: 128Mi
## Service configuration
## @param kafka.metrics.kafka.service.port Kafka Exporter Prometheus port to be used in wavefront configuration
##
service:
port: 9308
## Prometheus JMX Exporter: exposes the majority of Kafkas metrics
##
jmx:
## @param kafka.metrics.jmx.enabled Enable JMX exporter for Kafka
##
enabled: false
## Prometheus JMX Exporter' resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## @param kafka.metrics.jmx.resources.limits Resource limits for kafka prometheus exporter
## @param kafka.metrics.jmx.resources.requests.cpu CPU capacity request for Kafka prometheus nodes
## @param kafka.metrics.jmx.resources.requests.memory Memory capacity request for Kafka prometheus nodes
##
resources:
limits: {}
requests:
cpu: 100m
memory: 128Mi
## Service configuration
## @param kafka.metrics.jmx.service.port JMX Prometheus exporter service port
## @param kafka.metrics.jmx.service.annotations [object] Exporter service annotations
##
service:
port: 5556
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "5556"
prometheus.io/path: "/metrics"
prometheus.io/prefix: "kafkajmx."
## Zookeeper parameters
##
zookeeper:
## @param kafka.zookeeper.enabled Enable the Kafka subchart's Zookeeper
##
enabled: true
## @param kafka.zookeeper.replicaCount Number of Zookeeper nodes
##
replicaCount: 3
## @param kafka.zookeeper.heapSize Size in MB for the Java Heap options (Xmx and XMs) in Zookeeper. This env var is ignored if Xmx an Xms are configured via JVMFLAGS
##
heapSize: 4096
## Recommended values for cpu and memory requests
## @param kafka.zookeeper.resources.limits Resource limits for zookeeper
## @param kafka.zookeeper.resources.requests.cpu CPU capacity request for zookeeper
## @param kafka.zookeeper.resources.requests.memory Memory capacity request for zookeeper
##
resources:
limits: {}
requests:
cpu: 250m
memory: 5Gi
## Anti Affinity rules set for resiliency
## @param kafka.zookeeper.affinity.podAntiAffinity [object] Zookeeper pod anti affinity rules
## @skip kafka.zookeeper.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution
##
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- zookeeper
- key: app.kubernetes.io/instance
operator: In
values:
- "{{ .Release.Name }}"
topologyKey: "kubernetes.io/hostname"
## This value is only used when zookeeper.enabled is set to false.
##
externalZookeeper:
## Server or list of external zookeeper servers to use. This is set to the zookeeper deployed as part of this chart
## @param kafka.externalZookeeper.servers Array of external Zookeeper servers
##
servers: []
## @section Spark parameters
##
spark:
## @param spark.enabled Enable Spark subchart
##
enabled: true
## Spark master specific configuration
## @param spark.master.webPort Web port for spark master
## @param spark.master.resources.limits Spark master resource limits
## @param spark.master.resources.requests.cpu Spark master CPUs
## @param spark.master.resources.requests.memory Spark master requested memory
## @param spark.master.affinity.podAntiAffinity [object] Anti affinity rules set for resiliency
## @skip spark.master.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution
##
master:
## Spark container ports
##
webPort: 8080
resources:
## Recommended values for cpu and memory requests
##
limits: {}
requests:
cpu: 250m
memory: 5Gi
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- worker
- key: app.kubernetes.io/instance
operator: In
values:
- "{{ .Release.Name }}"
topologyKey: "kubernetes.io/hostname"
## Spark worker specific configuration
## @param spark.worker.replicaCount Number of spark workers
## @param spark.worker.webPort Web port for spark master
## @param spark.worker.resources.limits Spark master resource limits
## @param spark.worker.resources.requests.cpu Spark master CPUs
## @param spark.worker.resources.requests.memory Spark master requested memory
## @param spark.worker.affinity.podAntiAffinity [object] Anti affinity rules set for resiliency
## @skip spark.worker.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution
##
worker:
replicaCount: 2
## Spark container ports
##
webPort: 8081
resources:
## Recommended values for cpu and memory requests
##
limits: {}
requests:
cpu: 250m
memory: 5Gi
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- worker
- master
- key: app.kubernetes.io/name
operator: In
values:
- spark
- key: app.kubernetes.io/instance
operator: In
values:
- "{{ .Release.Name }}"
topologyKey: "kubernetes.io/hostname"
## Metrics configuration
## @param spark.metrics.enabled Enable Prometheus exporter for Spark
## @param spark.metrics.masterAnnotations [object] Annotations for Spark master exporter
## @param spark.metrics.workerAnnotations [object] Annotations for Spark worker exporter
##
metrics:
enabled: false
## Annotations for the Prometheus metrics on master nodes
##
masterAnnotations:
prometheus.io/scrape: "true"
prometheus.io/path: "/metrics/"
prometheus.io/port: "8080"
prometheus.io/prefix: "spark."
## Annotations for the Prometheus metrics on worker nodes
##
workerAnnotations:
prometheus.io/scrape: "true"
prometheus.io/path: "/metrics/"
prometheus.io/port: "8081"
prometheus.io/prefix: "spark."
## @section Elasticsearch parameters
##
elasticsearch:
## @param elasticsearch.enabled Enable Elasticsearch
##
enabled: true
## @param elasticsearch.global.kibanaEnabled Enable Kibana
##
global:
kibanaEnabled: true
## Elasticsearch master-eligible node parameters
## @param elasticsearch.master.replicas Number of Elasticsearch replicas
## @param elasticsearch.master.heapSize Heap Size for Elasticsearch master
## @param elasticsearch.master.affinity.podAntiAffinity [object] Elasticsearch pod anti affinity
## @param elasticsearch.master.resources.limits Elasticsearch master resource limits
## @param elasticsearch.master.resources.requests.cpu Elasticsearch master CPUs
## @param elasticsearch.master.resources.requests.memory Elasticsearch master requested memory
##
master:
## Number of master-eligible node(s) replicas to deploy
##
replicas: 3
heapSize: 768m
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
## @param elasticsearch.master.affinity.podAntiAffinity [object] Anti affinity rules set for resiliency
## @skip elasticsearch.master.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution
##
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- master
- key: app.kubernetes.io/name
operator: In
values:
- elasticsearch
- key: app.kubernetes.io/instance
operator: In
values:
- "{{ .Release.Name }}"
topologyKey: "kubernetes.io/hostname"
## Elasticsearch master-eligible container's resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
## We usually recommend not to specify default resources and to leave this as a conscious
## choice for the user. This also increases chances charts run on environments with little
## resources, such as Minikube.
##
limits: {}
requests:
cpu: 250m
memory: 1Gi
## Elasticsearch data node parameters
## @param elasticsearch.data.name Elasticsearch data node name
## @param elasticsearch.data.replicas Number of Elasticsearch replicas
## @param elasticsearch.data.heapSize Heap Size for Elasticsearch data node
## @param elasticsearch.data.affinity.podAntiAffinity [object] Anti affinity rules set for resiliency
## @skip elasticsearch.data.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution
## @param elasticsearch.data.resources.limits Elasticsearch data node resource limits
## @param elasticsearch.data.resources.requests.cpu Elasticsearch data node CPUs
## @param elasticsearch.data.resources.requests.memory Elasticsearch data node requested memory
##
data:
name: data
## Number of data node(s) replicas to deploy
##
replicas: 2
heapSize: 4096m
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
##
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- data
- key: app.kubernetes.io/name
operator: In
values:
- elasticsearch
- key: app.kubernetes.io/instance
operator: In
values:
- "{{ .Release.Name }}"
topologyKey: "kubernetes.io/hostname"
## Elasticsearch data container's resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
limits: {}
requests:
cpu: 250m
memory: 5Gi
## Elasticsearch coordinating-only node parameters
## @param elasticsearch.coordinating.replicas Number of Elasticsearch replicas
## @param elasticsearch.coordinating.heapSize Heap Size for Elasticsearch coordinating
## @param elasticsearch.coordinating.affinity.podAntiAffinity [object] Anti affinity rules set for resiliency
## @skip elasticsearch.coordinating.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution
## @param elasticsearch.coordinating.resources.limits Elasticsearch coordinating resource limits
## @param elasticsearch.coordinating.resources.requests.cpu Elasticsearch coordinating CPUs
## @param elasticsearch.coordinating.resources.requests.memory Elasticsearch coordinating requested memory
##
coordinating:
## Number of coordinating-only node(s) replicas to deploy
##
replicas: 2
heapSize: 768m
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
##
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- coordinating-only
- key: app.kubernetes.io/name
operator: In
values:
- elasticsearch
- key: app.kubernetes.io/instance
operator: In
values:
- "{{ .Release.Name }}"
topologyKey: "kubernetes.io/hostname"
## Elasticsearch coordinating-only container's resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
## We usually recommend not to specify default resources and to leave this as a conscious
## choice for the user. This also increases chances charts run on environments with little
## resources, such as Minikube.
##
limits: {}
requests:
cpu: 250m
memory: 1Gi
## Elasticsearch Prometheus exporter configuration
## ref: https://hub.docker.com/r/bitnami/elasticsearch-exporter/tags/
##
## @param elasticsearch.metrics.enabled Enable Prometheus exporter for Elasticsearch
## @param elasticsearch.metrics.resources.limits Elasticsearch metrics resource limits
## @param elasticsearch.metrics.resources.requests.cpu Elasticsearch metrics CPUs
## @param elasticsearch.metrics.resources.requests.memory Elasticsearch metrics requested memory
## @param elasticsearch.metrics.service.annotations [object] Elasticsearch metrics service annotations
##
metrics:
enabled: false
## Elasticsearch Prometheus exporter resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## We usually recommend not to specify default resources and to leave this as a conscious
## choice for the user. This also increases chances charts run on environments with little
## resources, such as Minikube. If you do want to specify resources, uncomment the following
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
##
resources:
limits: {}
requests:
cpu: 100m
memory: 128Mi
service:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9114"
## @section Logstash parameters
##
logstash:
## @param logstash.enabled Enable Logstash
##
enabled: true
## Number of Logstash replicas to deploy
## @param logstash.replicaCount Number of Logstash replicas
##
replicaCount: 2
## Affinity for pod assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
## @param logstash.affinity.podAntiAffinity [object] Logstash pod anti affinity
## @skip logstash.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution
##
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- logstash
- key: app.kubernetes.io/instance
operator: In
values:
- "{{ .Release.Name }}"
topologyKey: "kubernetes.io/hostname"
## @param logstash.extraEnvVars [array] Array containing extra env vars to configure Logstash
## For example:
## extraEnvVars:
## - name: ELASTICSEARCH_HOST
## value: "x.y.z"
##
extraEnvVars:
- name: LS_JAVA_OPTS
value: "-Xmx1g -Xms1g"
## Logstash containers' resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## @param logstash.resources.limits Elasticsearch metrics resource limits
## @param logstash.resources.requests.cpu Elasticsearch metrics CPUs
## @param logstash.resources.requests.memory Elasticsearch metrics requested memory
##
resources:
limits: {}
requests:
cpu: 250m
memory: 1500Mi
## Prometheus metrics
##
metrics:
## @param logstash.metrics.enabled Enable metrics for logstash
##
enabled: false
## Logstash Prometheus Exporter containers' resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## @param logstash.metrics.resources.limits Elasticsearch metrics resource limits
## @param logstash.metrics.resources.requests.cpu Elasticsearch metrics CPUs
## @param logstash.metrics.resources.requests.memory Elasticsearch metrics requested memory
##
resources:
limits: {}
requests:
cpu: 100m
memory: 128Mi
## @param logstash.metrics.service.port Logstash Prometheus port
##
service:
port: 9198
## @section Tanzu Observability (Wavefront) parameters
##
wavefront:
## @param wavefront.enabled Enable Tanzu Observability Framework
##
enabled: false
## This is a unique name for the cluster (required)
## All metrics will receive a `cluster` tag with this value
## @param wavefront.clusterName Cluster name
##
clusterName: KUBERNETES_CLUSTER_NAME
## Wavefront URL (cluster) and API Token (required)
## @param wavefront.wavefront.url Tanzu Observability cluster URL
## @param wavefront.wavefront.token Tanzu Observability access token
## @param wavefront.wavefront.existingSecret Tanzu Observability existing secret
##
wavefront:
url: https://YOUR_CLUSTER.wavefront.com
token: YOUR_API_TOKEN
## Name of an existing secret containing the token
##
existingSecret: ""
## Wavefront Collector is responsible to get all Kubernetes metrics from your cluster.
## It will capture Kubernetes resources metrics available from the kubelets,
## as well as auto-discovery capabilities.
## @param wavefront.collector.resources.limits Wavefront collector metrics resource limits
## @param wavefront.collector.resources.requests.cpu Wavefront collector metrics CPUs
## @param wavefront.collector.resources.requests.memory Wavefront collector metrics requested memory
## @param wavefront.collector.discovery.enabled Enable wavefront discovery
## @param wavefront.collector.discovery.enableRuntimeConfigs Enable runtime configs for wavefront discovery
## @param wavefront.collector.discovery.config [array] Wavefront discovery config
##
collector:
## Rules based discovery configuration
## Ref: https://github.com/wavefrontHQ/wavefront-kubernetes-collector/blob/master/docs/discovery.md
##
resources:
limits: {}
requests:
cpu: 200m
memory: 10Mi
discovery:
## @param wavefront.collector.discovery.enabled Rules based and Prometheus endpoints auto-discovery
##
enabled: true
## @param wavefront.collector.discovery.enableRuntimeConfigs Enable runtime discovery rules
## Ref: https://github.com/wavefrontHQ/wavefront-collector-for-kubernetes/blob/master/docs/discovery.md#runtime-configurations
##
enableRuntimeConfigs: true
## @param wavefront.collector.discovery.config [array] Configuration for rules based auto-discovery
##
## Example:
## config:
## - name: kafka-discovery
## type: prometheus
## selectors:
## images:
## - "*bitnami/kafka-exporter*"
## port: 9308
## path: /metrics
## scheme: http
## - name: kafka-jmx-discovery
## type: prometheus
## selectors:
## images:
## - "*bitnami/jmx-exporter*"
## port: 5556
## path: /metrics
## scheme: http
## prefix: kafkajmx.
## - name: elasticsearch-discovery
## type: prometheus
## selectors:
## images:
## - "*bitnami/elasticsearch-exporter*"
## port: 9114
## path: /metrics
## scheme: http
## - name: logstash-discovery
## type: prometheus
## selectors:
## images:
## - "*bitnami/logstash-exporter*"
## port: 9198
## path: /metrics
## scheme: http
## - name: spark-worker-discovery
## type: prometheus
## selectors:
## images:
## - "*bitnami/spark*"
## port: 8081
## path: /metrics/
## scheme: http
## prefix: spark.
## - name: spark-master-discovery
## type: prometheus
## selectors:
## images:
## - "*bitnami/spark*"
## port: 8080
## path: /metrics/
## scheme: http
## prefix: spark.
##
config: []
proxy:
## Wavefront Proxy resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## @param wavefront.proxy.resources.limits Wavefront Proxy metrics resource limits
## @param wavefront.proxy.resources.requests.cpu Wavefront Proxy metrics CPUs
## @param wavefront.proxy.resources.requests.memory Wavefront Proxy metrics requested memory
##
resources:
limits: {}
requests:
cpu: 100m
memory: 5Gi