diff --git a/.github/workflows/cd-pipeline.yml b/.github/workflows/cd-pipeline.yml
index 71eb833a20..d86234d22e 100644
--- a/.github/workflows/cd-pipeline.yml
+++ b/.github/workflows/cd-pipeline.yml
@@ -87,6 +87,7 @@ on: # rebuild any PRs and main branch changes
- 'bitnami/postgresql/**'
- 'bitnami/postgresql-ha/**'
- 'bitnami/prestashop/**'
+ - 'bitnami/prometheus/**'
- 'bitnami/pytorch/**'
- 'bitnami/rabbitmq-cluster-operator/**'
- 'bitnami/rabbitmq/**'
diff --git a/.vib/prometheus/cypress/cypress.env.json b/.vib/prometheus/cypress/cypress.env.json
new file mode 100644
index 0000000000..c79753f916
--- /dev/null
+++ b/.vib/prometheus/cypress/cypress.env.json
@@ -0,0 +1,18 @@
+{
+ "deployments": {
+ "alertmanager": {
+ "query": "alertmanager_alerts"
+ },
+ "prometheus": {
+ "query": "prometheus_http_requests_total"
+ }
+ },
+ "targets": {
+ "alertmanager": {
+ "replicaCount": 2
+ },
+ "prometheus": {
+ "replicaCount": 1
+ }
+ }
+}
diff --git a/.vib/prometheus/cypress/cypress.json b/.vib/prometheus/cypress/cypress.json
new file mode 100644
index 0000000000..7f8fff3de7
--- /dev/null
+++ b/.vib/prometheus/cypress/cypress.json
@@ -0,0 +1,4 @@
+{
+ "baseUrl": "http://localhost:8080",
+ "defaultCommandTimeout": 30000
+}
diff --git a/.vib/prometheus/cypress/cypress/integration/prometheus_spec.js b/.vib/prometheus/cypress/cypress/integration/prometheus_spec.js
new file mode 100644
index 0000000000..f1fc4637f4
--- /dev/null
+++ b/.vib/prometheus/cypress/cypress/integration/prometheus_spec.js
@@ -0,0 +1,25 @@
+///
+
+it('allows executing a query and displaying response data for each deployment', () => {
+ const deployments = Cypress.env('deployments');
+
+ cy.visit(`/graph`);
+ Object.keys(deployments).forEach((podName, i) => {
+ const query = Object.values(deployments)[i].query;
+
+ cy.get('[role="textbox"]').clear({force: true}).type(`${query}{enter}`,{delay: 100});
+ cy.contains('Execute').click();
+ cy.contains('.data-table', `container="${podName}"`)
+ })
+});
+
+it('checks targets status', () => {
+ const targets = Cypress.env('targets');
+
+ Object.keys(targets).forEach((podName, i) => {
+ const podData = Object.values(targets)[i];
+
+ cy.visit(`/targets?search=${podName}`);
+ cy.contains(`${podData.replicaCount}/${podData.replicaCount} up`);
+ })
+});
diff --git a/.vib/prometheus/goss/goss.yaml b/.vib/prometheus/goss/goss.yaml
new file mode 100644
index 0000000000..f87628f0e7
--- /dev/null
+++ b/.vib/prometheus/goss/goss.yaml
@@ -0,0 +1,74 @@
+command:
+ check-no-capabilities:
+ exec: cat /proc/1/status
+ exit-status: 0
+ stdout:
+ - "CapInh: 0000000000000000"
+ - "CapPrm: 0000000000000000"
+ - "CapEff: 0000000000000000"
+ - "CapBnd: 0000000000000000"
+ - "CapAmb: 0000000000000000"
+ {{- $uid := .Vars.server.podSecurityContext.runAsUser }}
+ {{- $gid := .Vars.server.podSecurityContext.fsGroup }}
+ check-user-info:
+ # The UID and GID should always be either the one specified as vars (always a bigger number that the default)
+ # or the one randomly defined by openshift (larger values). Otherwise, the chart is still using the default value.
+ exec: if [ $(id -u) -lt {{ $uid }} ] || [ $(id -G | awk '{print $2}') -lt {{ $gid }} ]; then exit 1; fi
+ exit-status: 0
+ {{ if .Vars.server.serviceAccount.automountServiceAccountToken }}
+ check-sa:
+ exec: cat /var/run/secrets/kubernetes.io/serviceaccount/token | cut -d '.' -f 2 | xargs -I '{}' echo '{}====' | fold -w 4 | sed '$ d' | tr -d '\n' | base64 -d
+ exit-status: 0
+ stdout:
+ - /serviceaccount.*name.*{{.Env.BITNAMI_APP_NAME }}/
+ {{ end }}
+file:
+ /opt/bitnami/prometheus/conf/{{ .Vars.server.existingConfigmapKey }}:
+ exists: true
+ contains:
+ - "job_name: alertmanager"
+ - "{{ (first .Vars.server.extraScrapeConfigs).job_name }}"
+ /opt/bitnami/prometheus/conf/rules.yaml:
+ exists: true
+ contains:
+ - "{{ (first .Vars.server.alertingRules.groups).name }}"
+ {{.Vars.server.persistence.mountPath}}:
+ exists: true
+ filetype: directory
+ mode: "2775"
+ owner: root
+ /proc/1/cmdline:
+ exists: true
+ contains:
+ - "--enable-feature={{ .Vars.server.enableFeatures | first }}"
+http:
+ http://localhost:{{ .Vars.server.containerPorts.http }}/-/ready:
+ status: 200
+ body:
+ - "Prometheus Server is Ready."
+ http://localhost:{{ .Vars.server.containerPorts.http }}/-/healthy:
+ status: 200
+ body:
+ - "Prometheus Server is Healthy."
+ {{- if .Vars.alertmanager.enabled }}
+ http://prometheus-alertmanager:{{ .Vars.alertmanager.service.ports.http }}/-/healthy:
+ status: 200
+ body:
+ - "OK"
+ http://localhost:{{ .Vars.server.containerPorts.http }}/api/v1/rules?name={{ (first .Vars.server.alertingRules.groups).name }}:
+ status: 200
+ body:
+ - "{{ (first (first .Vars.server.alertingRules.groups).rules).annotations.summary }}"
+ {{- end }}
+command:
+ check-config-files:
+ exec: promtool check config /opt/bitnami/prometheus/conf/{{ .Vars.server.existingConfigmapKey }}
+ exit-status: 0
+ stdout:
+ - SUCCESS
+ check-metrics:
+ exec: promtool query instant http://localhost:{{ .Vars.server.containerPorts.http }} prometheus_http_requests_total
+ exit-status: 0
+ stdout:
+ - "/-/healthy"
+ - "/-/ready"
\ No newline at end of file
diff --git a/.vib/prometheus/runtime-parameters.yaml b/.vib/prometheus/runtime-parameters.yaml
new file mode 100644
index 0000000000..2aeafbbe3a
--- /dev/null
+++ b/.vib/prometheus/runtime-parameters.yaml
@@ -0,0 +1,68 @@
+volumePermissions:
+ enabled: true
+server:
+ replicaCount: 1
+ serviceAccount:
+ create: true
+ automountServiceAccountToken: true
+ podSecurityContext:
+ enabled: true
+ runAsUser: 1002
+ fsGroup: 1002
+ containerSecurityContext:
+ enabled: true
+ capabilities:
+ drop:
+ - ALL
+ runAsNonRoot: true
+ readOnlyRootFilesystem: false
+ runAsUser: 1002
+ rbac:
+ create: true
+ persistence:
+ enabled: true
+ mountPath: /opt/bitnami/prometheus/data
+ service:
+ type: LoadBalancer
+ ports:
+ http: 80
+ enableFeatures: [ "memory-snapshot-on-shutdown" ]
+ containerPorts:
+ http: 8080
+ existingConfigmapKey: test.yaml
+ extraScrapeConfigs:
+ - job_name: wordpress
+ kubernetes_sd_configs:
+ - role: endpoints
+ namespaces:
+ names:
+ - default
+ metrics_path: /metrics
+ relabel_configs:
+ - source_labels:
+ - job
+ target_label: __tmp_wordpress_job_name
+ - action: keep
+ source_labels:
+ - __meta_kubernetes_service_label_app_kubernetes_io_instance
+ - __meta_kubernetes_service_labelpresent_app_kubernetes_io_instance
+ regex: (wordpress);true
+ alertingRules:
+ groups:
+ - name: example
+ rules:
+ - alert: Example
+ expr: count(prometheus_http_requests_total) > 2
+ for: 10m
+ labels:
+ severity: page
+ annotations:
+ summary: High number of requests to prometheus
+alertmanager:
+ enabled: true
+ replicaCount: 2
+ serviceMonitor:
+ enabled: true
+ service:
+ ports:
+ http: 9095
\ No newline at end of file
diff --git a/.vib/prometheus/vib-publish.json b/.vib/prometheus/vib-publish.json
new file mode 100644
index 0000000000..7c536af754
--- /dev/null
+++ b/.vib/prometheus/vib-publish.json
@@ -0,0 +1,85 @@
+{
+ "phases": {
+ "package": {
+ "context": {
+ "resources": {
+ "url": "{SHA_ARCHIVE}",
+ "path": "/bitnami/prometheus"
+ }
+ },
+ "actions": [
+ {
+ "action_id": "helm-package"
+ },
+ {
+ "action_id": "helm-lint"
+ }
+ ]
+ },
+ "verify": {
+ "context": {
+ "resources": {
+ "url": "{SHA_ARCHIVE}",
+ "path": "/bitnami/prometheus"
+ },
+ "target_platform": {
+ "target_platform_id": "{VIB_ENV_TARGET_PLATFORM}",
+ "size": {
+ "name": "S4"
+ }
+ }
+ },
+ "actions": [
+ {
+ "action_id": "health-check",
+ "params": {
+ "endpoint": "lb-prometheus-server-http",
+ "app_protocol": "HTTP"
+ }
+ },
+ {
+ "action_id": "cypress",
+ "params": {
+ "resources": {
+ "path": "/.vib/prometheus/cypress"
+ },
+ "endpoint": "lb-prometheus-server-http",
+ "app_protocol": "HTTP"
+ }
+ },
+ {
+ "action_id": "goss",
+ "params": {
+ "resources": {
+ "path": "/.vib"
+ },
+ "tests_file": "prometheus/goss/goss.yaml",
+ "vars_file": "prometheus/runtime-parameters.yaml",
+ "remote": {
+ "pod": {
+ "workload": "deploy-prometheus-server"
+ }
+ }
+ }
+ }
+ ]
+ },
+ "publish": {
+ "actions": [
+ {
+ "action_id": "helm-publish",
+ "params": {
+ "repository": {
+ "kind": "S3",
+ "url": "{VIB_ENV_S3_URL}",
+ "authn": {
+ "access_key_id": "{VIB_ENV_S3_ACCESS_KEY_ID}",
+ "secret_access_key": "{VIB_ENV_S3_SECRET_ACCESS_KEY}"
+ }
+ }
+ }
+ }
+ ]
+ }
+ }
+}
diff --git a/.vib/prometheus/vib-verify.json b/.vib/prometheus/vib-verify.json
new file mode 100644
index 0000000000..f644c339f0
--- /dev/null
+++ b/.vib/prometheus/vib-verify.json
@@ -0,0 +1,68 @@
+{
+ "phases": {
+ "package": {
+ "context": {
+ "resources": {
+ "url": "{SHA_ARCHIVE}",
+ "path": "/bitnami/prometheus"
+ }
+ },
+ "actions": [
+ {
+ "action_id": "helm-package"
+ },
+ {
+ "action_id": "helm-lint"
+ }
+ ]
+ },
+ "verify": {
+ "context": {
+ "resources": {
+ "url": "{SHA_ARCHIVE}",
+ "path": "/bitnami/prometheus"
+ },
+ "target_platform": {
+ "target_platform_id": "{VIB_ENV_TARGET_PLATFORM}",
+ "size": {
+ "name": "S4"
+ }
+ }
+ },
+ "actions": [
+ {
+ "action_id": "health-check",
+ "params": {
+ "endpoint": "lb-prometheus-server-http",
+ "app_protocol": "HTTP"
+ }
+ },
+ {
+ "action_id": "cypress",
+ "params": {
+ "resources": {
+ "path": "/.vib/prometheus/cypress"
+ },
+ "endpoint": "lb-prometheus-server-http",
+ "app_protocol": "HTTP"
+ }
+ },
+ {
+ "action_id": "goss",
+ "params": {
+ "resources": {
+ "path": "/.vib"
+ },
+ "tests_file": "prometheus/goss/goss.yaml",
+ "vars_file": "prometheus/runtime-parameters.yaml",
+ "remote": {
+ "pod": {
+ "workload": "deploy-prometheus-server"
+ }
+ }
+ }
+ }
+ ]
+ }
+ }
+}
diff --git a/bitnami/prometheus/Chart.lock b/bitnami/prometheus/Chart.lock
new file mode 100644
index 0000000000..8b8670c513
--- /dev/null
+++ b/bitnami/prometheus/Chart.lock
@@ -0,0 +1,6 @@
+dependencies:
+- name: common
+ repository: oci://registry-1.docker.io/bitnamicharts
+ version: 2.2.5
+digest: sha256:318f438acfeaced11d9060877d615caf1985417d2865810defaa886d3496f8d3
+generated: "2023-05-04T15:49:37.865565+02:00"
diff --git a/bitnami/prometheus/Chart.yaml b/bitnami/prometheus/Chart.yaml
new file mode 100644
index 0000000000..8d43ed799f
--- /dev/null
+++ b/bitnami/prometheus/Chart.yaml
@@ -0,0 +1,26 @@
+annotations:
+ category: Analytics
+ licenses: Apache-2.0
+apiVersion: v2
+appVersion: 2.42.0
+dependencies:
+ - name: common
+ repository: oci://registry-1.docker.io/bitnamicharts
+ tags:
+ - bitnami-common
+ version: 2.x.x
+description: Prometheus is an open source monitoring and alerting system. It enables sysadmins to monitor their infrastructures by collecting metrics from configured targets at given intervals.
+home: https://github.com/prometheus/prometheus
+icon: https://bitnami.com/assets/stacks/prometheus/img/prometheus-stack-220x234.png
+keywords:
+ - prometheus
+ - monitoring
+maintainers:
+ - name: Bitnami
+ url: https://github.com/bitnami/charts
+name: prometheus
+sources:
+ - https://github.com/bitnami/containers/tree/main/bitnami/prometheus
+ - https://github.com/prometheus/prometheus
+ - https://github.com/prometheus-community/helm-charts
+version: 0.1.0
diff --git a/bitnami/prometheus/README.md b/bitnami/prometheus/README.md
new file mode 100644
index 0000000000..ef353f0fdf
--- /dev/null
+++ b/bitnami/prometheus/README.md
@@ -0,0 +1,675 @@
+
+
+# Prometheus packaged by Bitnami
+
+Prometheus is an open source monitoring and alerting system. It enables sysadmins to monitor their infrastructures by collecting metrics from configured targets at given intervals.
+
+[Overview of Prometheus](https://prometheus.io/)
+
+## TL;DR
+
+```console
+helm install my-release oci://registry-1.docker.io/bitnamicharts/prometheus
+```
+
+## Introduction
+
+Bitnami charts for Helm are carefully engineered, actively maintained and are the quickest and easiest way to deploy containers on a Kubernetes cluster that are ready to handle production workloads.
+
+This chart bootstraps a [Prometheus](https://prometheus.io) Deployment in a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
+
+[Learn more about the default configuration of the chart](https://docs.bitnami.com/kubernetes/infrastructure/prometheus/get-started/).
+
+## Prerequisites
+
+- Kubernetes 1.19+
+- Helm 3.2.0+
+- PV provisioner support in the underlying infrastructure
+- ReadWriteMany volumes for deployment scaling
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```console
+helm install my-release oci://registry-1.docker.io/bitnamicharts/prometheus
+```
+
+The command deploys Prometheus on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```console
+helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Parameters
+
+### Global parameters
+
+| Name | Description | Value |
+| ------------------------- | ----------------------------------------------- | ----- |
+| `global.imageRegistry` | Global Docker image registry | `""` |
+| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
+| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
+
+### Common parameters
+
+| Name | Description | Value |
+| ------------------------ | --------------------------------------------------------------------------------------- | --------------- |
+| `kubeVersion` | Override Kubernetes version | `""` |
+| `nameOverride` | String to partially override common.names.name | `""` |
+| `fullnameOverride` | String to fully override common.names.fullname | `""` |
+| `namespaceOverride` | String to fully override common.names.namespace | `""` |
+| `commonLabels` | Labels to add to all deployed objects | `{}` |
+| `commonAnnotations` | Annotations to add to all deployed objects | `{}` |
+| `clusterDomain` | Kubernetes cluster domain name | `cluster.local` |
+| `extraDeploy` | Array of extra objects to deploy with the release | `[]` |
+| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` |
+| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` |
+| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` |
+| `ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` |
+
+### Alertmanager Parameters
+
+| Name | Description | Value |
+| -------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------- |
+| `alertmanager.enabled` | Alertmanager enabled | `true` |
+| `alertmanager.image.registry` | Alertmanager image registry | `docker.io` |
+| `alertmanager.image.repository` | Alertmanager image repository | `bitnami/alertmanager` |
+| `alertmanager.image.tag` | Alertmanager image tag (immutable tags are recommended) | `0.25.0-debian-11-r48` |
+| `alertmanager.image.digest` | Alertmanager image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) | `""` |
+| `alertmanager.image.pullPolicy` | Alertmanager image pull policy | `IfNotPresent` |
+| `alertmanager.image.pullSecrets` | Alertmanager image pull secrets | `[]` |
+| `alertmanager.configuration` | Alertmanager configuration. This content will be stored in the the alertmanager.yaml file and the content can be a template. | `""` |
+| `alertmanager.replicaCount` | Number of Alertmanager replicas to deploy | `1` |
+| `alertmanager.containerPorts.http` | Alertmanager HTTP container port | `9093` |
+| `alertmanager.containerPorts.cluster` | Alertmanager Cluster HA port | `9094` |
+| `alertmanager.livenessProbe.enabled` | Enable livenessProbe on Alertmanager containers | `true` |
+| `alertmanager.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` |
+| `alertmanager.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` |
+| `alertmanager.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `3` |
+| `alertmanager.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` |
+| `alertmanager.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
+| `alertmanager.readinessProbe.enabled` | Enable readinessProbe on Alertmanager containers | `true` |
+| `alertmanager.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
+| `alertmanager.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
+| `alertmanager.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `2` |
+| `alertmanager.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` |
+| `alertmanager.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
+| `alertmanager.startupProbe.enabled` | Enable startupProbe on Alertmanager containers | `false` |
+| `alertmanager.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `2` |
+| `alertmanager.startupProbe.periodSeconds` | Period seconds for startupProbe | `5` |
+| `alertmanager.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `2` |
+| `alertmanager.startupProbe.failureThreshold` | Failure threshold for startupProbe | `10` |
+| `alertmanager.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
+| `alertmanager.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
+| `alertmanager.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
+| `alertmanager.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
+| `alertmanager.resources.limits` | The resources limits for the Alertmanager containers | `{}` |
+| `alertmanager.resources.requests` | The requested resources for the Alertmanager containers | `{}` |
+| `alertmanager.podSecurityContext.enabled` | Enabled Alertmanager pods' Security Context | `true` |
+| `alertmanager.podSecurityContext.fsGroup` | Set Alertmanager pod's Security Context fsGroup | `1001` |
+| `alertmanager.containerSecurityContext.enabled` | Enabled Alertmanager containers' Security Context | `true` |
+| `alertmanager.containerSecurityContext.runAsUser` | Set Alertmanager containers' Security Context runAsUser | `1001` |
+| `alertmanager.containerSecurityContext.runAsNonRoot` | Set Alertmanager containers' Security Context runAsNonRoot | `true` |
+| `alertmanager.containerSecurityContext.readOnlyRootFilesystem` | Set Alertmanager containers' Security Context runAsNonRoot | `false` |
+| `alertmanager.existingConfigmap` | The name of an existing ConfigMap with your custom configuration for Alertmanager | `""` |
+| `alertmanager.existingConfigmapKey` | The name of the key with the Alertmanager config file | `""` |
+| `alertmanager.command` | Override default container command (useful when using custom images) | `[]` |
+| `alertmanager.args` | Override default container args (useful when using custom images) | `[]` |
+| `alertmanager.extraArgs` | Additional arguments passed to the Prometheus server container | `[]` |
+| `alertmanager.hostAliases` | Alertmanager pods host aliases | `[]` |
+| `alertmanager.podLabels` | Extra labels for Alertmanager pods | `{}` |
+| `alertmanager.podAnnotations` | Annotations for Alertmanager pods | `{}` |
+| `alertmanager.podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `alertmanager.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` |
+| `alertmanager.pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` |
+| `alertmanager.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` |
+| `alertmanager.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` |
+| `alertmanager.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `alertmanager.nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set | `""` |
+| `alertmanager.nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set | `[]` |
+| `alertmanager.affinity` | Affinity for Alertmanager pods assignment | `{}` |
+| `alertmanager.nodeSelector` | Node labels for Alertmanager pods assignment | `{}` |
+| `alertmanager.tolerations` | Tolerations for Alertmanager pods assignment | `[]` |
+| `alertmanager.updateStrategy.type` | Alertmanager statefulset strategy type | `RollingUpdate` |
+| `alertmanager.podManagementPolicy` | Statefulset Pod management policy, it needs to be Parallel to be able to complete the cluster join | `OrderedReady` |
+| `alertmanager.priorityClassName` | Alertmanager pods' priorityClassName | `""` |
+| `alertmanager.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
+| `alertmanager.schedulerName` | Name of the k8s scheduler (other than default) for Alertmanager pods | `""` |
+| `alertmanager.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` |
+| `alertmanager.lifecycleHooks` | for the Alertmanager container(s) to automate configuration before or after startup | `{}` |
+| `alertmanager.extraEnvVars` | Array with extra environment variables to add to Alertmanager nodes | `[]` |
+| `alertmanager.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Alertmanager nodes | `""` |
+| `alertmanager.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Alertmanager nodes | `""` |
+| `alertmanager.extraVolumes` | Optionally specify extra list of additional volumes for the Alertmanager pod(s) | `[]` |
+| `alertmanager.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Alertmanager container(s) | `[]` |
+| `alertmanager.sidecars` | Add additional sidecar containers to the Alertmanager pod(s) | `[]` |
+| `alertmanager.initContainers` | Add additional init containers to the Alertmanager pod(s) | `[]` |
+| `alertmanager.ingress.enabled` | Enable ingress record generation for Alertmanager | `false` |
+| `alertmanager.ingress.pathType` | Ingress path type | `ImplementationSpecific` |
+| `alertmanager.ingress.hostname` | Default host for the ingress record | `alertmanager.prometheus.local` |
+| `alertmanager.ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` |
+| `alertmanager.ingress.path` | Default path for the ingress record | `/` |
+| `alertmanager.ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` |
+| `alertmanager.ingress.tls` | Enable TLS configuration for the host defined at `ingress.hostname` parameter | `false` |
+| `alertmanager.ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` |
+| `alertmanager.ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` |
+| `alertmanager.ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` |
+| `alertmanager.ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` |
+| `alertmanager.ingress.secrets` | Custom TLS certificates as secrets | `[]` |
+| `alertmanager.ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` |
+| `alertmanager.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
+| `alertmanager.serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
+| `alertmanager.serviceAccount.annotations` | Additional Service Account annotations (evaluated as a template) | `{}` |
+| `alertmanager.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `true` |
+| `alertmanager.service.type` | Alertmanager service type | `LoadBalancer` |
+| `alertmanager.service.ports.http` | Alertmanager service HTTP port | `80` |
+| `alertmanager.service.ports.cluster` | Alertmanager cluster HA port | `9094` |
+| `alertmanager.service.nodePorts.http` | Node port for HTTP | `""` |
+| `alertmanager.service.clusterIP` | Alertmanager service Cluster IP | `""` |
+| `alertmanager.service.loadBalancerIP` | Alertmanager service Load Balancer IP | `""` |
+| `alertmanager.service.loadBalancerSourceRanges` | Alertmanager service Load Balancer sources | `[]` |
+| `alertmanager.service.externalTrafficPolicy` | Alertmanager service external traffic policy | `Cluster` |
+| `alertmanager.service.annotations` | Additional custom annotations for Alertmanager service | `{}` |
+| `alertmanager.service.extraPorts` | Extra ports to expose in Alertmanager service (normally used with the `sidecars` value) | `[]` |
+| `alertmanager.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` |
+| `alertmanager.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
+| `alertmanager.persistence.enabled` | Enable Alertmanager data persistence using VolumeClaimTemplates | `false` |
+| `alertmanager.persistence.mountPath` | Path to mount the volume at. | `/bitnami/alertmanager/data` |
+| `alertmanager.persistence.subPath` | The subdirectory of the volume to mount to, useful in dev environments and one PV for multiple services | `""` |
+| `alertmanager.persistence.storageClass` | PVC Storage Class for Concourse worker data volume | `""` |
+| `alertmanager.persistence.accessModes` | PVC Access Mode for Concourse worker volume | `["ReadWriteOnce"]` |
+| `alertmanager.persistence.size` | PVC Storage Request for Concourse worker volume | `8Gi` |
+| `alertmanager.persistence.annotations` | Annotations for the PVC | `{}` |
+| `alertmanager.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` |
+
+### Prometheus server Parameters
+
+| Name | Description | Value |
+| ----------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- |
+| `server.image.registry` | Prometheus image registry | `docker.io` |
+| `server.image.repository` | Prometheus image repository | `bitnami/prometheus` |
+| `server.image.tag` | Prometheus image tag (immutable tags are recommended) | `2.44.0-debian-11-r0` |
+| `server.image.digest` | Prometheus image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) | `""` |
+| `server.image.pullPolicy` | Prometheus image pull policy | `IfNotPresent` |
+| `server.image.pullSecrets` | Prometheus image pull secrets | `[]` |
+| `server.configuration` | Promethus configuration. This content will be stored in the the prometheus.yaml file and the content can be a template. | `""` |
+| `server.alertingRules` | Prometheus alerting rules. This content will be stored in the the rules.yaml file and the content can be a template. | `{}` |
+| `server.extraScrapeConfigs` | Promethus configuration, useful to declare new scrape_configs. This content will be merged with the 'server.configuration' value and stored in the the prometheus.yaml file. | `[]` |
+| `server.replicaCount` | Number of Prometheus replicas to deploy | `1` |
+| `server.containerPorts.http` | Prometheus HTTP container port | `9090` |
+| `server.livenessProbe.enabled` | Enable livenessProbe on Prometheus containers | `true` |
+| `server.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` |
+| `server.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` |
+| `server.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `3` |
+| `server.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` |
+| `server.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
+| `server.readinessProbe.enabled` | Enable readinessProbe on Prometheus containers | `true` |
+| `server.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
+| `server.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
+| `server.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `2` |
+| `server.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` |
+| `server.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
+| `server.startupProbe.enabled` | Enable startupProbe on Prometheus containers | `false` |
+| `server.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `2` |
+| `server.startupProbe.periodSeconds` | Period seconds for startupProbe | `5` |
+| `server.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `2` |
+| `server.startupProbe.failureThreshold` | Failure threshold for startupProbe | `10` |
+| `server.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
+| `server.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
+| `server.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
+| `server.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
+| `server.resources.limits` | The resources limits for the Prometheus containers | `{}` |
+| `server.resources.requests` | The requested resources for the Prometheus containers | `{}` |
+| `server.podSecurityContext.enabled` | Enabled Prometheus pods' Security Context | `true` |
+| `server.podSecurityContext.fsGroup` | Set Prometheus pod's Security Context fsGroup | `1001` |
+| `server.containerSecurityContext.enabled` | Enabled Prometheus containers' Security Context | `true` |
+| `server.containerSecurityContext.runAsUser` | Set Prometheus containers' Security Context runAsUser | `1001` |
+| `server.containerSecurityContext.runAsNonRoot` | Set Prometheus containers' Security Context runAsNonRoot | `true` |
+| `server.containerSecurityContext.readOnlyRootFilesystem` | Set Prometheus containers' Security Context runAsNonRoot | `false` |
+| `server.existingConfigmap` | The name of an existing ConfigMap with your custom configuration for Prometheus | `""` |
+| `server.existingConfigmapKey` | The name of the key with the Prometheus config file | `""` |
+| `server.command` | Override default container command (useful when using custom images) | `[]` |
+| `server.args` | Override default container args (useful when using custom images) | `[]` |
+| `server.extraArgs` | Additional arguments passed to the Prometheus server container | `[]` |
+| `server.hostAliases` | Prometheus pods host aliases | `[]` |
+| `server.podLabels` | Extra labels for Prometheus pods | `{}` |
+| `server.podAnnotations` | Annotations for Prometheus pods | `{}` |
+| `server.podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `server.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` |
+| `server.pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` |
+| `server.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` |
+| `server.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` |
+| `server.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
+| `server.nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set | `""` |
+| `server.nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set | `[]` |
+| `server.affinity` | Affinity for Prometheus pods assignment | `{}` |
+| `server.nodeSelector` | Node labels for Prometheus pods assignment | `{}` |
+| `server.tolerations` | Tolerations for Prometheus pods assignment | `[]` |
+| `server.updateStrategy.type` | Prometheus deployment strategy type. If persistence is enabled, strategy type should be set to Recreate to avoid dead locks. | `RollingUpdate` |
+| `server.priorityClassName` | Prometheus pods' priorityClassName | `""` |
+| `server.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
+| `server.schedulerName` | Name of the k8s scheduler (other than default) for Prometheus pods | `""` |
+| `server.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` |
+| `server.lifecycleHooks` | for the Prometheus container(s) to automate configuration before or after startup | `{}` |
+| `server.extraEnvVars` | Array with extra environment variables to add to Prometheus nodes | `[]` |
+| `server.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Prometheus nodes | `""` |
+| `server.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Prometheus nodes | `""` |
+| `server.extraVolumes` | Optionally specify extra list of additional volumes for the Prometheus pod(s) | `[]` |
+| `server.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Prometheus container(s) | `[]` |
+| `server.sidecars` | Add additional sidecar containers to the Prometheus pod(s) | `[]` |
+| `server.initContainers` | Add additional init containers to the Prometheus pod(s) | `[]` |
+| `server.routePrefix` | Prefix for the internal routes of web endpoints | `/` |
+| `server.remoteWrite` | The remote_write spec configuration for Prometheus | `[]` |
+| `server.scrapeInterval` | Interval between consecutive scrapes. Example: "1m" | `""` |
+| `server.scrapeTimeout` | Interval between consecutive scrapes. Example: "10s" | `""` |
+| `server.evaluationInterval` | Interval between consecutive evaluations. Example: "1m" | `""` |
+| `server.enableAdminAPI` | Enable Prometheus adminitrative API | `false` |
+| `server.enableRemoteWriteReceiver` | Enable Prometheus to be used as a receiver for the Prometheus remote write protocol. | `false` |
+| `server.enableFeatures` | Enable access to Prometheus disabled features. | `[]` |
+| `server.logLevel` | Log level for Prometheus | `info` |
+| `server.logFormat` | Log format for Prometheus | `logfmt` |
+| `server.retention` | Metrics retention days | `10d` |
+| `server.retentionSize` | Maximum size of metrics | `0` |
+| `server.alertingEndpoints` | Alertmanagers to which alerts will be sent | `[]` |
+| `server.externalLabels` | External labels to add to any time series or alerts when communicating with external systems | `{}` |
+| `server.thanos.create` | Create a Thanos sidecar container | `false` |
+| `server.thanos.image.registry` | Thanos image registry | `docker.io` |
+| `server.thanos.image.repository` | Thanos image name | `bitnami/thanos` |
+| `server.thanos.image.tag` | Thanos image tag | `0.31.0-scratch-r3` |
+| `server.thanos.image.digest` | Thanos image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
+| `server.thanos.image.pullPolicy` | Thanos image pull policy | `IfNotPresent` |
+| `server.thanos.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
+| `server.thanos.containerSecurityContext.enabled` | Enable container security context | `true` |
+| `server.thanos.containerSecurityContext.readOnlyRootFilesystem` | mount / (root) as a readonly filesystem | `false` |
+| `server.thanos.containerSecurityContext.allowPrivilegeEscalation` | Switch privilegeEscalation possibility on or off | `false` |
+| `server.thanos.containerSecurityContext.runAsNonRoot` | Force the container to run as a non root user | `true` |
+| `server.thanos.containerSecurityContext.capabilities.drop` | Linux Kernel capabilities which should be dropped | `[]` |
+| `server.thanos.prometheusUrl` | Override default prometheus url `http://localhost:9090` | `""` |
+| `server.thanos.extraArgs` | Additional arguments passed to the thanos sidecar container | `[]` |
+| `server.thanos.objectStorageConfig.secretName` | Support mounting a Secret for the objectStorageConfig of the sideCar container. | `""` |
+| `server.thanos.objectStorageConfig.secretKey` | Secret key with the configuration file. | `thanos.yaml` |
+| `server.thanos.extraVolumeMounts` | Additional volumeMounts from `server.volumes` for thanos sidecar container | `[]` |
+| `server.thanos.resources.limits` | The resources limits for the Thanos sidecar container | `{}` |
+| `server.thanos.resources.requests` | The resources requests for the Thanos sidecar container | `{}` |
+| `server.thanos.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
+| `server.thanos.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `0` |
+| `server.thanos.livenessProbe.periodSeconds` | How often to perform the probe | `5` |
+| `server.thanos.livenessProbe.timeoutSeconds` | When the probe times out | `3` |
+| `server.thanos.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe | `120` |
+| `server.thanos.livenessProbe.successThreshold` | Minimum consecutive successes for the probe | `1` |
+| `server.thanos.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
+| `server.thanos.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `0` |
+| `server.thanos.readinessProbe.periodSeconds` | How often to perform the probe | `5` |
+| `server.thanos.readinessProbe.timeoutSeconds` | When the probe times out | `3` |
+| `server.thanos.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe | `120` |
+| `server.thanos.readinessProbe.successThreshold` | Minimum consecutive successes for the probe | `1` |
+| `server.thanos.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
+| `server.thanos.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
+| `server.thanos.service.type` | Kubernetes service type | `ClusterIP` |
+| `server.thanos.service.ports.grpc` | Thanos service port | `10901` |
+| `server.thanos.service.clusterIP` | Specific cluster IP when service type is cluster IP. Use `None` to create headless service by default. | `None` |
+| `server.thanos.service.nodePorts.grpc` | Specify the nodePort value for the LoadBalancer and NodePort service types. | `""` |
+| `server.thanos.service.loadBalancerIP` | `loadBalancerIP` if service type is `LoadBalancer` | `""` |
+| `server.thanos.service.loadBalancerSourceRanges` | Address that are allowed when svc is `LoadBalancer` | `[]` |
+| `server.thanos.service.annotations` | Additional annotations for Prometheus service | `{}` |
+| `server.thanos.service.extraPorts` | Additional ports to expose from the Thanos sidecar container | `[]` |
+| `server.thanos.service.externalTrafficPolicy` | Prometheus service external traffic policy | `Cluster` |
+| `server.thanos.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
+| `server.thanos.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
+| `server.thanos.ingress.enabled` | Enable ingress controller resource | `false` |
+| `server.thanos.ingress.pathType` | Ingress path type | `ImplementationSpecific` |
+| `server.thanos.ingress.hostname` | Default host for the ingress record | `thanos.prometheus.local` |
+| `server.thanos.ingress.path` | Default path for the ingress record | `/` |
+| `server.thanos.ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` |
+| `server.thanos.ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` |
+| `server.thanos.ingress.tls` | Enable TLS configuration for the host defined at `ingress.hostname` parameter | `false` |
+| `server.thanos.ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` |
+| `server.thanos.ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` |
+| `server.thanos.ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` |
+| `server.thanos.ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` |
+| `server.thanos.ingress.secrets` | Custom TLS certificates as secrets | `[]` |
+| `server.thanos.ingress.extraRules` | The list of additional rules to be added to this ingress record. Evaluated as a template | `[]` |
+| `server.ingress.enabled` | Enable ingress record generation for Prometheus | `false` |
+| `server.ingress.pathType` | Ingress path type | `ImplementationSpecific` |
+| `server.ingress.hostname` | Default host for the ingress record | `server.prometheus.local` |
+| `server.ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` |
+| `server.ingress.path` | Default path for the ingress record | `/` |
+| `server.ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` |
+| `server.ingress.tls` | Enable TLS configuration for the host defined at `ingress.hostname` parameter | `false` |
+| `server.ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` |
+| `server.ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` |
+| `server.ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` |
+| `server.ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` |
+| `server.ingress.secrets` | Custom TLS certificates as secrets | `[]` |
+| `server.ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` |
+| `server.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
+| `server.serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
+| `server.serviceAccount.annotations` | Additional Service Account annotations (evaluated as a template) | `{}` |
+| `server.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `true` |
+| `server.service.type` | Prometheus service type | `LoadBalancer` |
+| `server.service.ports.http` | Prometheus service HTTP port | `80` |
+| `server.service.nodePorts.http` | Node port for HTTP | `""` |
+| `server.service.clusterIP` | Prometheus service Cluster IP | `""` |
+| `server.service.loadBalancerIP` | Prometheus service Load Balancer IP | `""` |
+| `server.service.loadBalancerSourceRanges` | Prometheus service Load Balancer sources | `[]` |
+| `server.service.externalTrafficPolicy` | Prometheus service external traffic policy | `Cluster` |
+| `server.service.annotations` | Additional custom annotations for Prometheus service | `{}` |
+| `server.service.extraPorts` | Extra ports to expose in Prometheus service (normally used with the `sidecars` value) | `[]` |
+| `server.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin. ClientIP by default. | `ClientIP` |
+| `server.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
+| `server.persistence.enabled` | Enable persistence using Persistent Volume Claims. If you have multiple instances (server.repicacount > 1), please considere using an external storage service like Thanos or Grafana Mimir | `false` |
+| `server.persistence.mountPath` | Path to mount the volume at. | `/bitnami/prometheus/data` |
+| `server.persistence.subPath` | The subdirectory of the volume to mount to, useful in dev environments and one PV for multiple services | `""` |
+| `server.persistence.storageClass` | Storage class of backing PVC | `""` |
+| `server.persistence.annotations` | Persistent Volume Claim annotations | `{}` |
+| `server.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` |
+| `server.persistence.size` | Size of data volume | `8Gi` |
+| `server.persistence.existingClaim` | The name of an existing PVC to use for persistence | `""` |
+| `server.persistence.selector` | Selector to match an existing Persistent Volume for WordPress data PVC | `{}` |
+| `server.persistence.dataSource` | Custom PVC data source | `{}` |
+| `server.rbac.create` | Specifies whether RBAC resources should be created | `true` |
+| `server.rbac.rules` | Custom RBAC rules to set | `[]` |
+
+### Init Container Parameters
+
+| Name | Description | Value |
+| ------------------------------------------------------ | ----------------------------------------------------------------------------------------------- | ----------------------- |
+| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` |
+| `volumePermissions.image.registry` | Bitnami Shell image registry | `docker.io` |
+| `volumePermissions.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` |
+| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r99` |
+| `volumePermissions.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` |
+| `volumePermissions.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` |
+| `volumePermissions.resources.limits` | The resources limits for the init container | `{}` |
+| `volumePermissions.resources.requests` | The requested resources for the init container | `{}` |
+| `volumePermissions.containerSecurityContext.runAsUser` | Set init container's Security Context runAsUser | `0` |
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+helm install my-release --set alertmanager.enabled=true \
+ oci://registry-1.docker.io/bitnamicharts/prometheus
+```
+
+The above command install Prometheus chart with Alertmanager.
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
+
+```console
+helm install my-release -f values.yaml oci://registry-1.docker.io/bitnamicharts/prometheus
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml)
+
+## Configuration and installation details
+
+### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
+
+It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
+
+Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
+
+### Deploy extra resources
+
+There are cases where you may want to deploy extra objects, such a ConfigMap containing your app's configuration or some extra deployment with a micro service used by your app. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter.
+
+### Setting Pod's affinity
+
+This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
+
+As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters.
+
+### Integrate Prometheus and Alertmanager with Thanos
+
+You can integrate Prometheus & Alertmanager with Thanos using this chart and the [Bitnami Thanos chart](https://github.com/bitnami/charts/tree/main/bitnami/thanos) following the steps below:
+
+> Note: in this example we will use MinIO® (subchart) as the Objstore. Every component will be deployed in the "monitoring" namespace.
+
+- Create a **values.yaml** like the one below for Thanos:
+
+```yaml
+objstoreConfig: |-
+ type: s3
+ config:
+ bucket: thanos
+ endpoint: {{ include "thanos.minio.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:9000
+ access_key: minio
+ secret_key: minio123
+ insecure: true
+query:
+ dnsDiscovery:
+ sidecarsService: prometheus-thanos
+ sidecarsNamespace: monitoring
+bucketweb:
+ enabled: true
+compactor:
+ enabled: true
+storegateway:
+ enabled: true
+ruler:
+ enabled: true
+ alertmanagers:
+ - http://prometheus-alertmanager.monitoring.svc.cluster.local:9093
+ config: |-
+ groups:
+ - name: "metamonitoring"
+ rules:
+ - alert: "PrometheusDown"
+ expr: absent(up{prometheus="monitoring/prometheus"})
+metrics:
+ enabled: true
+ serviceMonitor:
+ enabled: true
+minio:
+ enabled: true
+ auth:
+ rootPassword: minio123
+ rootUser: minio
+ monitoringBuckets: thanos
+ accessKey:
+ password: minio
+ secretKey:
+ password: minio123
+```
+
+- Install Prometheus and Thanos charts:
+
+For Helm 3:
+
+```console
+kubectl create namespace monitoring
+helm install prometheus \
+ --set prometheus.thanos.create=true \
+ --namespace monitoring \
+ oci://registry-1.docker.io/bitnamicharts/prometheus
+helm install thanos \
+ --values values.yaml \
+ --namespace monitoring \
+ oci://registry-1.docker.io/bitnamicharts/thanos
+```
+
+That's all! Now you have Thanos fully integrated with Prometheus and Alertmanager.
+
+### Integrate Prometheus with Grafana Mimir
+
+You can integrate Prometheus with Grafana Mimir using this chart and the [Bitnami Grafana Mimir chart](https://github.com/bitnami/charts/tree/main/bitnami/grafana-mimir) adding a `remoteWrite` entry:
+
+- Create a **values.yaml** like the one below for Prometheus:
+
+```yaml
+server:
+ remoteWrite:
+ - url: http://grafana-mimir-gateway.svc.cluster.local/api/v1/push
+ headers:
+ X-Scope-OrgID: demo
+```
+
+- Install Prometheus and Grafana Mimir charts:
+
+For Helm 3:
+
+```console
+kubectl create namespace monitoring
+helm install prometheus \
+ --values values.yaml \
+ --namespace monitoring \
+ oci://registry-1.docker.io/bitnamicharts/prometheus
+helm install grafana-mimir \
+ oci://registry-1.docker.io/bitnamicharts/grafana-mimir
+```
+
+That's all! Now you have Prometheus integrated with Grafana Mimir.
+
+### Integrate Prometheus with Grafana
+
+You can integrate Prometheus with Grafana Dashboard using this chart and the [Bitnami Grafana chart](https://github.com/bitnami/charts/tree/main/bitnami/grafana) just adding the prometheus datasources:
+
+- Create a **values.yaml** like the one below for Grafana:
+
+```yaml
+datasources:
+ secretDefinition:
+ apiVersion: 1
+ datasources:
+ - name: Prometheus
+ type: prometheus
+ access: proxy
+ orgId: 1
+ url: http://prometheus.monitoring.svc.cluster.local
+ version: 1
+ editable: true
+ isDefault: true
+ - name: Alertmanager
+ uid: alertmanager
+ type: alertmanager
+ access: proxy
+ orgId: 1
+ url: http://prometheus-alertmanager.monitoring.svc.cluster.local:9093
+ version: 1
+ editable: true
+```
+
+- Install Prometheus and Grafana charts:
+
+For Helm 3:
+
+```console
+kubectl create namespace monitoring
+helm install prometheus \
+ --namespace monitoring \
+ oci://registry-1.docker.io/bitnamicharts/prometheus
+helm install grafana-mimir \
+ --values values.yaml \
+ --namespace monitoring \
+ oci://registry-1.docker.io/bitnamicharts/grafana
+```
+
+### How to add new targets
+
+By default this helm chart will monitor its own targets: prometheus and alertmanager. Additional ones can be added setting a list with the [scrape_configs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) in the value `server.extraScrapeConfigs`. Here there is a simple example for wordpress (deployed in the default namespace):
+
+```yaml
+server:
+ extraScrapeConfigs:
+ - job_name: wordpress
+ kubernetes_sd_configs:
+ - role: endpoints
+ namespaces:
+ names:
+ - default
+ metrics_path: /metrics
+ relabel_configs:
+ - source_labels:
+ - job
+ target_label: __tmp_wordpress_job_name
+ - action: keep
+ source_labels:
+ - __meta_kubernetes_service_label_app_kubernetes_io_instance
+ - __meta_kubernetes_service_labelpresent_app_kubernetes_io_instance
+ regex: (wordpress);true
+ - action: keep
+ source_labels:
+ - __meta_kubernetes_service_label_app_kubernetes_io_name
+ - __meta_kubernetes_service_labelpresent_app_kubernetes_io_name
+ regex: (wordpress);true
+ - action: keep
+ source_labels:
+ - __meta_kubernetes_endpoint_port_name
+ regex: metrics
+ - source_labels:
+ - __meta_kubernetes_endpoint_address_target_kind
+ - __meta_kubernetes_endpoint_address_target_name
+ separator: ;
+ regex: Node;(.*)
+ replacement: ${1}
+ target_label: node
+ - source_labels:
+ - __meta_kubernetes_endpoint_address_target_kind
+ - __meta_kubernetes_endpoint_address_target_name
+ separator: ;
+ regex: Pod;(.*)
+ replacement: ${1}
+ target_label: pod
+ - source_labels:
+ - __meta_kubernetes_namespace
+ target_label: namespace
+ - source_labels:
+ - __meta_kubernetes_service_name
+ target_label: service
+ - source_labels:
+ - __meta_kubernetes_pod_name
+ target_label: pod
+ - source_labels:
+ - __meta_kubernetes_pod_container_name
+ target_label: container
+ - action: drop
+ source_labels:
+ - __meta_kubernetes_pod_phase
+ regex: (Failed|Succeeded)
+ - source_labels:
+ - __meta_kubernetes_service_name
+ target_label: job
+ replacement: ${1}
+ - target_label: endpoint
+ replacement: metrics
+ - source_labels:
+ - __address__
+ target_label: __tmp_hash
+ modulus: 1
+ action: hashmod
+ - source_labels:
+ - __tmp_hash
+ regex: 0
+ action: keep
+```
+
+## Troubleshooting
+
+Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
+
+## License
+
+Copyright © 2023 Bitnami
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/bitnami/prometheus/templates/NOTES.txt b/bitnami/prometheus/templates/NOTES.txt
new file mode 100644
index 0000000000..b82c5f4722
--- /dev/null
+++ b/bitnami/prometheus/templates/NOTES.txt
@@ -0,0 +1,123 @@
+CHART NAME: {{ .Chart.Name }}
+CHART VERSION: {{ .Chart.Version }}
+APP VERSION: {{ .Chart.AppVersion }}
+
+** Please be patient while the chart is being deployed **
+
+{{- if .Values.diagnosticMode.enabled }}
+The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with:
+
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }}
+
+Get the list of pods by executing:
+
+ kubectl get pods --namespace {{ include "common.names.namespace" . | quote }} -l app.kubernetes.io/instance={{ .Release.Name }}
+
+Access the pod you want to debug by executing
+
+ kubectl exec --namespace {{ include "common.names.namespace" . | quote }} -ti -- bash
+
+In order to replicate the container startup scripts execute this command:
+
+ /opt/bitnami/prometheus/bin/prometheus --config.file=/opt/bitnami/prometheus/conf/prometheus.yml --storage.tsdb.path=/opt/bitnami/prometheus/data --web.console.libraries=/opt/bitnami/prometheus/conf/console_libraries --web.console.templates=/opt/bitnami/prometheus/conf/consoles
+
+{{- else }}
+
+Prometheus can be accessed via port "{{ .Values.server.service.ports.http }}" on the following DNS name from within your cluster:
+
+ {{ template "common.names.fullname" . }}.{{ include "common.names.namespace" . }}.svc.cluster.local
+
+To access Prometheus from outside the cluster execute the following commands:
+
+{{- if .Values.server.ingress.enabled }}
+
+ You should be able to access your new Prometheus installation through
+
+ {{ ternary "https" "http" .Values.server.ingress.tls }}://{{ .Values.server.ingress.hostname }}
+
+{{- else if contains "LoadBalancer" .Values.server.service.type }}
+
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ Watch the status with: 'kubectl get svc --namespace {{ include "common.names.namespace" . }} -w {{ template "common.names.fullname" . }}'
+
+{{- $port:=.Values.server.service.ports.http | toString }}
+
+ export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.names.namespace" . }} {{ template "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
+ echo "Prometheus URL: http://$SERVICE_IP{{- if ne $port "80" }}:{{ .Values.server.service.ports.http }}{{ end }}/"
+
+{{- else if contains "ClusterIP" .Values.server.service.type }}
+
+ echo "Prometheus URL: http://127.0.0.1:9090/"
+ kubectl port-forward --namespace {{ include "common.names.namespace" . }} svc/{{ template "common.names.fullname" . }} 9090:{{ .Values.server.service.ports.http }}
+
+{{- else if contains "NodePort" .Values.server.service.type }}
+
+ export NODE_PORT=$(kubectl get --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo "Prometheus URL: http://$NODE_IP:$NODE_PORT/"
+
+{{- end }}
+
+{{- if and .Values.server.thanos.create }}
+
+Thanos Sidecar can be accessed via port "{{ .Values.server.thanos.service.ports.grpc }}" on the following DNS name from within your cluster:
+
+ {{ template "prometheus.thanos-sidecar.fullname" . }}.{{ include "common.names.namespace" . }}.svc.cluster.local
+
+{{- if .Values.server.thanos.ingress.enabled }}
+
+ You should be able to access your new Thanos Sidecar installation through
+
+ {{ ternary "https" "http" .Values.server.thanos.ingress.tls }}://{{ .Values.server.thanos.ingress.hostname }}
+
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{- if .Values.alertmanager.enabled }}
+
+Watch the Alertmanager StatefulSet status using the command:
+
+ kubectl get sts -w --namespace {{ include "common.names.namespace" . }} -l app.kubernetes.io/name={{ template "prometheus.alertmanager.fullname" . }},app.kubernetes.io/instance={{ .Release.Name }}
+
+Alertmanager can be accessed via port "{{ .Values.alertmanager.service.ports.http }}" on the following DNS name from within your cluster:
+
+ {{ template "prometheus.alertmanager.fullname" . }}.{{ include "common.names.namespace" . }}.svc.cluster.local
+
+To access Alertmanager from outside the cluster execute the following commands:
+
+{{- if .Values.alertmanager.ingress.enabled }}
+
+ You should be able to access your new Prometheus installation through
+
+ {{ ternary "https" "http" .Values.alertmanager.ingress.tls }}://{{ .Values.alertmanager.ingress.hostname }}
+
+{{- else if contains "LoadBalancer" .Values.alertmanager.service.type }}
+
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ Watch the status with: 'kubectl get svc --namespace {{ include "common.names.namespace" . }} -w {{ template "prometheus.alertmanager.fullname" . }}'
+
+{{- $port:=.Values.alertmanager.service.ports.http | toString }}
+
+ export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.names.namespace" . }} {{ template "prometheus.alertmanager.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
+ echo "Alertmanager URL: http://$SERVICE_IP{{- if ne $port "80" }}:{{ .Values.alertmanager.service.ports.http }}{{ end }}/"
+
+{{- else if contains "ClusterIP" .Values.alertmanager.service.type }}
+
+ echo "Alertmanager URL: http://127.0.0.1:9093/"
+ kubectl port-forward --namespace {{ include "common.names.namespace" . }} svc/{{ template "prometheus.alertmanager.fullname" . }} 9093:{{ .Values.alertmanager.service.ports.http }}
+
+{{- else if contains "NodePort" .Values.alertmanager.service.type }}
+
+ export NODE_PORT=$(kubectl get --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.alertmanager.fullname" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo "Alertmanager URL: http://$NODE_IP:$NODE_PORT/"
+
+{{- end }}
+{{- end }}
+
+{{- include "common.warnings.rollingTag" .Values.server.image }}
+{{- include "common.warnings.rollingTag" .Values.server.thanos.image }}
+{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }}
+{{- include "prometheus.server.validateValues" . }}
diff --git a/bitnami/prometheus/templates/_helpers.tpl b/bitnami/prometheus/templates/_helpers.tpl
new file mode 100644
index 0000000000..60cdce9091
--- /dev/null
+++ b/bitnami/prometheus/templates/_helpers.tpl
@@ -0,0 +1,151 @@
+{{/*
+Return the proper image name
+*/}}
+{{- define "prometheus.server.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.server.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper image name
+*/}}
+{{- define "prometheus.alertmanager.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.alertmanager.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper image name
+*/}}
+{{- define "prometheus.server.thanosImage" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.server.thanos.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return Prometheus server name
+*/}}
+{{- define "prometheus.server.fullname" -}}
+ {{- printf "%s-server" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }}
+{{- end -}}
+
+{{/*
+Return Prometheus server name
+*/}}
+{{- define "prometheus.server.fullname.namespace" -}}
+ {{- printf "%s-server" (include "common.names.fullname.namespace" .) | trunc 63 | trimSuffix "-" }}
+{{- end -}}
+
+{{/*
+Return the proper image name (for the init container volume-permissions image)
+*/}}
+{{- define "prometheus.volumePermissions.image" -}}
+{{- include "common.images.image" ( dict "imageRoot" .Values.volumePermissions.image "global" .Values.global ) -}}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names
+*/}}
+{{- define "prometheus.imagePullSecrets" -}}
+{{- include "common.images.pullSecrets" (dict "images" (list .Values.server.image .Values.volumePermissions.image .Values.server.thanos.image .Values.alertmanager.image) "global" .Values.global) -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "prometheus.server.serviceAccountName" -}}
+{{- if .Values.server.serviceAccount.create -}}
+ {{ default (include "prometheus.server.fullname" .) .Values.server.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.server.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Compile all warnings into a single message.
+*/}}
+{{- define "prometheus.server.validateValues" -}}
+{{- $messages := list -}}
+{{- $messages := append $messages (include "prometheus.server.validateValues.thanosObjectStorageConfig" .) -}}
+{{- $messages := without $messages "" -}}
+{{- $message := join "\n" $messages -}}
+
+{{- if $message -}}
+{{- printf "\nVALUES VALIDATION:\n%s" $message -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Validate thanos objectStorageConfig.
+*/}}
+{{- define "prometheus.server.validateValues.thanosObjectStorageConfig" -}}
+{{- if (and .Values.server.thanos.objectStorageConfig (or (not (hasKey .Values.server.thanos.objectStorageConfig "secretKey")) (not (hasKey .Values.server.thanos.objectStorageConfig "secretName")) ))}}
+ {{- printf "'server.thanos.objectStorageConfig.secretKey' and 'server.thanos.objectStorageConfi.secretName' are mandatory" }}
+{{- end }}
+{{- end }}
+
+{{/*
+Get the Prometheus configuration configmap.
+*/}}
+{{- define "prometheus.server.configmapName" -}}
+{{- if .Values.server.existingConfigmap -}}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.existingConfigmap "context" .) -}}
+{{- else }}
+ {{- include "prometheus.server.fullname" . -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the Prometheus configuration configmap key.
+*/}}
+{{- define "prometheus.server.configmapKey" -}}
+{{- if .Values.server.existingConfigmapKey -}}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.existingConfigmapKey "context" .) -}}
+{{- else }}
+ {{- printf "prometheus.yaml" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the Prometheus Alertmanager configuration configmap key.
+*/}}
+{{- define "prometheus.alertmanager.configmapKey" -}}
+{{- if .Values.alertmanager.existingConfigmapKey -}}
+ {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.existingConfigmapKey "context" .) -}}
+{{- else }}
+ {{- printf "alertmanager.yaml" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use in alertmanager
+*/}}
+{{- define "prometheus.alertmanager.serviceAccountName" -}}
+{{- if .Values.alertmanager.serviceAccount.create -}}
+ {{ default (include "prometheus.alertmanager.fullname" .) .Values.alertmanager.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.alertmanager.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return Thanos sidecar service/ingress name
+*/}}
+{{- define "prometheus.thanos-sidecar.fullname" -}}
+ {{- printf "%s-thanos" (include "common.names.fullname" .) }}
+{{- end -}}
+
+{{/*
+Return Alertmanager name
+*/}}
+{{- define "prometheus.alertmanager.fullname" -}}
+ {{- printf "%s-alertmanager" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }}
+{{- end -}}
+
+{{/*
+Get the Alertmanager configuration configmap.
+*/}}
+{{- define "prometheus.alertmanager.configmapName" -}}
+{{- if .Values.alertmanager.existingConfigmap -}}
+ {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.existingConfigmap "context" .) -}}
+{{- else }}
+ {{- include "prometheus.alertmanager.fullname" . -}}
+{{- end -}}
+{{- end -}}
\ No newline at end of file
diff --git a/bitnami/prometheus/templates/_scrape_config.tpl b/bitnami/prometheus/templates/_scrape_config.tpl
new file mode 100644
index 0000000000..340da5cdae
--- /dev/null
+++ b/bitnami/prometheus/templates/_scrape_config.tpl
@@ -0,0 +1,81 @@
+{{/*
+Return the prometheus scrape configuration for kubernetes objects.
+Usage:
+{{ include "prometheus.scrape_config" (dict "component" "alertmanager" "context" $) }}
+*/}}
+{{- define "prometheus.scrape_config" -}}
+kubernetes_sd_configs:
+ - role: endpoints
+ namespaces:
+ names:
+ - {{ include "common.names.namespace" .context }}
+metrics_path: /metrics
+relabel_configs:
+ - source_labels:
+ - job
+ target_label: __tmp_prometheus_job_name
+ - action: keep
+ source_labels:
+ - __meta_kubernetes_service_label_app_kubernetes_io_component
+ - __meta_kubernetes_service_labelpresent_app_kubernetes_io_component
+ regex: ({{ .component }});true
+ - action: keep
+ source_labels:
+ - __meta_kubernetes_service_label_app_kubernetes_io_instance
+ - __meta_kubernetes_service_labelpresent_app_kubernetes_io_instance
+ regex: ({{ .context.Release.Name }});true
+ - action: keep
+ source_labels:
+ - __meta_kubernetes_service_label_app_kubernetes_io_name
+ - __meta_kubernetes_service_labelpresent_app_kubernetes_io_name
+ regex: (prometheus);true
+ - action: keep
+ source_labels:
+ - __meta_kubernetes_endpoint_port_name
+ regex: http
+ - source_labels:
+ - __meta_kubernetes_endpoint_address_target_kind
+ - __meta_kubernetes_endpoint_address_target_name
+ separator: ;
+ regex: Node;(.*)
+ replacement: ${1}
+ target_label: node
+ - source_labels:
+ - __meta_kubernetes_endpoint_address_target_kind
+ - __meta_kubernetes_endpoint_address_target_name
+ separator: ;
+ regex: Pod;(.*)
+ replacement: ${1}
+ target_label: pod
+ - source_labels:
+ - __meta_kubernetes_namespace
+ target_label: namespace
+ - source_labels:
+ - __meta_kubernetes_service_name
+ target_label: service
+ - source_labels:
+ - __meta_kubernetes_pod_name
+ target_label: pod
+ - source_labels:
+ - __meta_kubernetes_pod_container_name
+ target_label: container
+ - action: drop
+ source_labels:
+ - __meta_kubernetes_pod_phase
+ regex: (Failed|Succeeded)
+ - source_labels:
+ - __meta_kubernetes_service_name
+ target_label: job
+ replacement: ${1}
+ - target_label: endpoint
+ replacement: http
+ - source_labels:
+ - __address__
+ target_label: __tmp_hash
+ modulus: 1
+ action: hashmod
+ - source_labels:
+ - __tmp_hash
+ regex: 0
+ action: keep
+{{- end -}}
\ No newline at end of file
diff --git a/bitnami/prometheus/templates/alertmanager/configmap.yaml b/bitnami/prometheus/templates/alertmanager/configmap.yaml
new file mode 100644
index 0000000000..61ca5da2c3
--- /dev/null
+++ b/bitnami/prometheus/templates/alertmanager/configmap.yaml
@@ -0,0 +1,19 @@
+{{- if and .Values.alertmanager.enabled (not .Values.alertmanager.existingConfigmap) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "prometheus.alertmanager.fullname" . | quote }}
+ namespace: {{ include "common.names.namespace" . | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: alertmanager
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+ {{ include "prometheus.alertmanager.configmapKey" . }}:
+ {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.configuration "context" $) | toYaml | nindent 4 }}
+{{- end }}
\ No newline at end of file
diff --git a/bitnami/prometheus/templates/alertmanager/ingress.yaml b/bitnami/prometheus/templates/alertmanager/ingress.yaml
new file mode 100644
index 0000000000..e71e0307f2
--- /dev/null
+++ b/bitnami/prometheus/templates/alertmanager/ingress.yaml
@@ -0,0 +1,64 @@
+{{- if and .Values.alertmanager.enabled .Values.alertmanager.ingress.enabled }}
+apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }}
+kind: Ingress
+metadata:
+ name: {{ include "prometheus.alertmanager.fullname" . | quote }}
+ namespace: {{ include "common.names.namespace" . | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: alertmanager
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.alertmanager.ingress.annotations .Values.commonAnnotations }}
+ annotations:
+ {{- if .Values.alertmanager.ingress.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.alertmanager.ingress.annotations "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+spec:
+ {{- if and .Values.alertmanager.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }}
+ ingressClassName: {{ .Values.alertmanager.ingress.ingressClassName | quote }}
+ {{- end }}
+ rules:
+ {{- if .Values.alertmanager.ingress.hostname }}
+ - host: {{ .Values.alertmanager.ingress.hostname }}
+ http:
+ paths:
+ {{- if .Values.alertmanager.ingress.extraPaths }}
+ {{- toYaml .Values.alertmanager.ingress.extraPaths | nindent 10 }}
+ {{- end }}
+ - path: {{ .Values.alertmanager.ingress.path }}
+ {{- if eq "true" (include "common.ingress.supportsPathType" .) }}
+ pathType: {{ .Values.alertmanager.ingress.pathType }}
+ {{- end }}
+ backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" "http" "context" $) | nindent 14 }}
+ {{- end }}
+ {{- range .Values.alertmanager.ingress.extraHosts }}
+ - host: {{ .name | quote }}
+ http:
+ paths:
+ - path: {{ default "/" .path }}
+ {{- if eq "true" (include "common.ingress.supportsPathType" $) }}
+ pathType: {{ default "ImplementationSpecific" .pathType }}
+ {{- end }}
+ backend: {{- include "common.ingress.backend" (dict "serviceName" (include "prometheus.alertmanager.fullname" $) "servicePort" "http" "context" $) | nindent 14 }}
+ {{- end }}
+ {{- if .Values.alertmanager.ingress.extraRules }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.ingress.extraRules "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if or (and .Values.alertmanager.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.alertmanager.ingress.annotations )) .Values.alertmanager.ingress.selfSigned)) .Values.alertmanager.ingress.extraTls }}
+ tls:
+ {{- if and .Values.alertmanager.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.alertmanager.ingress.annotations )) .Values.alertmanager.ingress.selfSigned) }}
+ - hosts:
+ - {{ .Values.alertmanager.ingress.hostname | quote }}
+ secretName: {{ printf "%s-tls" .Values.alertmanager.ingress.hostname }}
+ {{- end }}
+ {{- if .Values.alertmanager.ingress.extraTls }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.ingress.extraTls "context" $) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/bitnami/prometheus/templates/alertmanager/pdb.yaml b/bitnami/prometheus/templates/alertmanager/pdb.yaml
new file mode 100644
index 0000000000..58efea02c6
--- /dev/null
+++ b/bitnami/prometheus/templates/alertmanager/pdb.yaml
@@ -0,0 +1,28 @@
+{{- $replicaCount := int .Values.alertmanager.replicaCount }}
+{{- if and .Values.alertmanager.enabled .Values.alertmanager.pdb.create (gt $replicaCount 1) }}
+apiVersion: {{ include "common.capabilities.policy.apiVersion" . }}
+kind: PodDisruptionBudget
+metadata:
+ name: {{ include "prometheus.alertmanager.fullname" . | quote }}
+ namespace: {{ include "common.names.namespace" . | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: alertmanager
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ {{- if .Values.alertmanager.pdb.minAvailable }}
+ minAvailable: {{ .Values.alertmanager.pdb.minAvailable }}
+ {{- end }}
+ {{- if .Values.alertmanager.pdb.maxUnavailable }}
+ maxUnavailable: {{ .Values.alertmanager.pdb.maxUnavailable }}
+ {{- end }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: alertmanager
+{{- end }}
diff --git a/bitnami/prometheus/templates/alertmanager/service-account.yaml b/bitnami/prometheus/templates/alertmanager/service-account.yaml
new file mode 100644
index 0000000000..dad76fbeef
--- /dev/null
+++ b/bitnami/prometheus/templates/alertmanager/service-account.yaml
@@ -0,0 +1,23 @@
+{{- if and .Values.alertmanager.enabled .Values.alertmanager.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "prometheus.alertmanager.serviceAccountName" . | quote }}
+ namespace: {{ include "common.names.namespace" . | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: alertmanager
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.alertmanager.serviceAccount.annotations .Values.commonAnnotations }}
+ annotations:
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.alertmanager.serviceAccount.annotations }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.serviceAccount.annotations "context" $) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+automountServiceAccountToken: {{ .Values.alertmanager.serviceAccount.automountServiceAccountToken }}
+{{- end }}
diff --git a/bitnami/prometheus/templates/alertmanager/service-headless.yaml b/bitnami/prometheus/templates/alertmanager/service-headless.yaml
new file mode 100644
index 0000000000..23080ac58b
--- /dev/null
+++ b/bitnami/prometheus/templates/alertmanager/service-headless.yaml
@@ -0,0 +1,40 @@
+{{- if and .Values.alertmanager.enabled (gt (int .Values.alertmanager.replicaCount) 1) }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ printf "%s-headless" (include "prometheus.alertmanager.fullname" .) }}
+ namespace: {{ include "common.names.namespace" . | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: alertmanager
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.alertmanager.service.annotations .Values.commonAnnotations }}
+ annotations:
+ {{- if .Values.alertmanager.service.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.alertmanager.service.annotations "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - name: tcp-cluster
+ port: {{ .Values.alertmanager.service.ports.cluster }}
+ protocol: TCP
+ targetPort: tcp-cluster
+ - name: udp-cluster
+ port: {{ .Values.alertmanager.service.ports.cluster }}
+ protocol: UDP
+ targetPort: udp-cluster
+ {{- if .Values.alertmanager.service.extraPorts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.service.extraPorts "context" $) | nindent 4 }}
+ {{- end }}
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: alertmanager
+{{- end }}
\ No newline at end of file
diff --git a/bitnami/prometheus/templates/alertmanager/service.yaml b/bitnami/prometheus/templates/alertmanager/service.yaml
new file mode 100644
index 0000000000..88d80c86b7
--- /dev/null
+++ b/bitnami/prometheus/templates/alertmanager/service.yaml
@@ -0,0 +1,58 @@
+{{- if .Values.alertmanager.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "prometheus.alertmanager.fullname" . | quote }}
+ namespace: {{ include "common.names.namespace" . | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: alertmanager
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.alertmanager.service.annotations .Values.commonAnnotations }}
+ annotations:
+ {{- if .Values.alertmanager.service.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.alertmanager.service.annotations "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+spec:
+ type: {{ .Values.alertmanager.service.type }}
+ {{- if and .Values.alertmanager.service.clusterIP (eq .Values.alertmanager.service.type "ClusterIP") }}
+ clusterIP: {{ .Values.alertmanager.service.clusterIP }}
+ {{- end }}
+ {{- if .Values.alertmanager.service.sessionAffinity }}
+ sessionAffinity: {{ .Values.alertmanager.service.sessionAffinity }}
+ {{- end }}
+ {{- if .Values.alertmanager.service.sessionAffinityConfig }}
+ sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.service.sessionAffinityConfig "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if or (eq .Values.alertmanager.service.type "LoadBalancer") (eq .Values.alertmanager.service.type "NodePort") }}
+ externalTrafficPolicy: {{ .Values.alertmanager.service.externalTrafficPolicy | quote }}
+ {{- end }}
+ {{- if and (eq .Values.alertmanager.service.type "LoadBalancer") (not (empty .Values.alertmanager.service.loadBalancerSourceRanges)) }}
+ loadBalancerSourceRanges: {{ .Values.alertmanager.service.loadBalancerSourceRanges }}
+ {{- end }}
+ {{- if and (eq .Values.alertmanager.service.type "LoadBalancer") (not (empty .Values.alertmanager.service.loadBalancerIP)) }}
+ loadBalancerIP: {{ .Values.alertmanager.service.loadBalancerIP }}
+ {{- end }}
+ ports:
+ - name: http
+ port: {{ .Values.alertmanager.service.ports.http }}
+ protocol: TCP
+ targetPort: http
+ {{- if and (or (eq .Values.alertmanager.service.type "NodePort") (eq .Values.alertmanager.service.type "LoadBalancer")) (not (empty .Values.alertmanager.service.nodePorts.http)) }}
+ nodePort: {{ .Values.alertmanager.service.nodePorts.http }}
+ {{- else if eq .Values.alertmanager.service.type "ClusterIP" }}
+ nodePort: null
+ {{- end }}
+ {{- if .Values.alertmanager.service.extraPorts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.service.extraPorts "context" $) | nindent 4 }}
+ {{- end }}
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: alertmanager
+{{- end }}
\ No newline at end of file
diff --git a/bitnami/prometheus/templates/alertmanager/statefulset.yaml b/bitnami/prometheus/templates/alertmanager/statefulset.yaml
new file mode 100644
index 0000000000..64be4a17dc
--- /dev/null
+++ b/bitnami/prometheus/templates/alertmanager/statefulset.yaml
@@ -0,0 +1,245 @@
+{{- if .Values.alertmanager.enabled }}
+{{- $clusterPort := .Values.alertmanager.containerPorts.cluster }}
+apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
+kind: StatefulSet
+metadata:
+ name: {{ include "prometheus.alertmanager.fullname" . | quote }}
+ namespace: {{ include "common.names.namespace" . | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: alertmanager
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ replicas: {{ .Values.alertmanager.replicaCount }}
+ podManagementPolicy: {{ .Values.alertmanager.podManagementPolicy | quote }}
+ selector:
+ matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: alertmanager
+ serviceName: {{ printf "%s-headless" (include "prometheus.alertmanager.fullname" .) }}
+ {{- if .Values.alertmanager.updateStrategy }}
+ updateStrategy: {{- toYaml .Values.alertmanager.updateStrategy | nindent 4 }}
+ {{- end }}
+ template:
+ metadata:
+ {{- if .Values.alertmanager.podAnnotations }}
+ annotations: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.podAnnotations "context" $) | nindent 8 }}
+ {{- end }}
+ labels: {{- include "common.labels.standard" . | nindent 8 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: alertmanager
+ {{- if .Values.alertmanager.podLabels }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.podLabels "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ spec:
+ serviceAccountName: {{ template "prometheus.alertmanager.serviceAccountName" . }}
+ {{- include "prometheus.imagePullSecrets" . | nindent 6 }}
+ {{- if .Values.alertmanager.hostAliases }}
+ hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.hostAliases "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.alertmanager.affinity }}
+ affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.alertmanager.affinity "context" $) | nindent 8 }}
+ {{- else }}
+ affinity:
+ podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.alertmanager.podAffinityPreset "component" "alertmanager" "context" $) | nindent 10 }}
+ podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.alertmanager.podAntiAffinityPreset "component" "alertmanager" "context" $) | nindent 10 }}
+ nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.alertmanager.nodeAffinityPreset.type "key" .Values.alertmanager.nodeAffinityPreset.key "values" .Values.alertmanager.nodeAffinityPreset.values) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.alertmanager.nodeSelector }}
+ nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.alertmanager.nodeSelector "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.alertmanager.tolerations }}
+ tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.tolerations "context" .) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.alertmanager.priorityClassName }}
+ priorityClassName: {{ .Values.alertmanager.priorityClassName | quote }}
+ {{- end }}
+ {{- if .Values.alertmanager.schedulerName }}
+ schedulerName: {{ .Values.alertmanager.schedulerName | quote }}
+ {{- end }}
+ {{- if .Values.alertmanager.topologySpreadConstraints }}
+ topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.topologySpreadConstraints "context" .) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.alertmanager.podSecurityContext.enabled }}
+ securityContext: {{- omit .Values.alertmanager.podSecurityContext "enabled" | toYaml | nindent 8 }}
+ {{- end }}
+ {{- if .Values.alertmanager.terminationGracePeriodSeconds }}
+ terminationGracePeriodSeconds: {{ .Values.alertmanager.terminationGracePeriodSeconds }}
+ {{- end }}
+ initContainers:
+ {{- if and .Values.volumePermissions.enabled .Values.alertmanager.persistence.enabled }}
+ - name: volume-permissions
+ image: {{ include "prometheus.volumePermissions.image" . }}
+ imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+ command:
+ - sh
+ - -ec
+ - |
+ mkdir -p {{ .Values.alertmanager.persistence.mountPath }}
+ find {{ .Values.alertmanager.persistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.alertmanager.containerSecurityContext.runAsUser }}:{{ .Values.alertmanager.podSecurityContext.fsGroup }}
+ {{- if .Values.alertmanager.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.alertmanager.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.volumePermissions.resources }}
+ resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: data
+ mountPath: {{ .Values.alertmanager.persistence.mountPath }}
+ {{- if .Values.alertmanager.persistence.subPath }}
+ subPath: {{ .Values.alertmanager.persistence.subPath }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.alertmanager.initContainers }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.initContainers "context" $) | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: alertmanager
+ image: {{ template "prometheus.alertmanager.image" . }}
+ imagePullPolicy: {{ .Values.alertmanager.image.pullPolicy }}
+ {{- if .Values.alertmanager.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.alertmanager.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ {{- else if .Values.alertmanager.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.command "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- else if .Values.alertmanager.args }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.args "context" $) | nindent 12 }}
+ {{- else }}
+ args:
+ - "--config.file=/opt/bitnami/alertmanager/conf/{{ include "prometheus.alertmanager.configmapKey" . }}"
+ - "--storage.path=/opt/bitnami/alertmanager/data"
+ - "--web.listen-address=0.0.0.0:{{ .Values.alertmanager.containerPorts.http }}"
+ {{- if gt (int .Values.alertmanager.replicaCount) 1 }}
+ - "--cluster.advertise-address=[$(POD_IP)]:{{ $clusterPort }}"
+ - "--cluster.listen-address=0.0.0.0:{{ $clusterPort }}"
+ {{- $fullName := include "prometheus.alertmanager.fullname" . }}
+ {{- range $i := until (int .Values.alertmanager.replicaCount) }}
+ - "--cluster.peer={{ $fullName }}-{{ $i }}.{{ $fullName }}-headless:{{ $clusterPort }}"
+ {{- end }}
+ {{- end }}
+ {{- if .Values.alertmanager.extraArgs }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.extraArgs "context" $) | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ env:
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ {{- if .Values.alertmanager.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ envFrom:
+ {{- if .Values.alertmanager.extraEnvVarsCM }}
+ - configMapRef:
+ name: {{ include "common.tplvalues.render" (dict "value" .Values.alertmanager.extraEnvVarsCM "context" $) }}
+ {{- end }}
+ {{- if .Values.alertmanager.extraEnvVarsSecret }}
+ - secretRef:
+ name: {{ include "common.tplvalues.render" (dict "value" .Values.alertmanager.extraEnvVarsSecret "context" $) }}
+ {{- end }}
+ {{- if .Values.alertmanager.resources }}
+ resources: {{- toYaml .Values.alertmanager.resources | nindent 12 }}
+ {{- end }}
+ ports:
+ - name: http
+ containerPort: {{ .Values.alertmanager.containerPorts.http }}
+ - name: tcp-cluster
+ containerPort: {{ $clusterPort }}
+ protocol: TCP
+ - name: udp-cluster
+ containerPort: {{ $clusterPort }}
+ protocol: UDP
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.alertmanager.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.alertmanager.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.alertmanager.livenessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /-/healthy
+ port: http
+ {{- end }}
+ {{- if .Values.alertmanager.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.alertmanager.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.alertmanager.readinessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /-/ready
+ port: http
+ {{- end }}
+ {{- if .Values.alertmanager.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.alertmanager.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.alertmanager.startupProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: http
+ {{- end }}
+ {{- end }}
+ {{- if .Values.alertmanager.lifecycleHooks }}
+ lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.lifecycleHooks "context" $) | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: config
+ mountPath: /opt/bitnami/alertmanager/conf
+ readOnly: true
+ - name: data
+ mountPath: {{ .Values.alertmanager.persistence.mountPath }}
+ {{- if .Values.alertmanager.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.extraVolumeMounts "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.alertmanager.sidecars }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.alertmanager.sidecars "context" $) | nindent 8 }}
+ {{- end }}
+ volumes:
+ - name: config
+ configMap:
+ name: {{ include "prometheus.alertmanager.configmapName" . }}
+ {{- if not .Values.alertmanager.persistence.enabled }}
+ - name: data
+ emptyDir: {}
+ {{- end}}
+ {{- if .Values.alertmanager.extraVolumes }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.extraVolumes "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.alertmanager.persistence.enabled }}
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ annotations:
+ {{- if .Values.alertmanager.persistence.annotations }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.persistence.annotations "context" $) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.commonLabels }}
+ labels: {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 10 }}
+ {{- end }}
+ spec:
+ accessModes:
+ {{- range .Values.alertmanager.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.alertmanager.persistence.size | quote }}
+ {{- if .Values.alertmanager.persistence.selector }}
+ selector: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.persistence.selector "context" $) | nindent 10 }}
+ {{- end }}
+ {{- include "common.storage.class" (dict "persistence" .Values.alertmanager.persistence "global" .Values.global) | nindent 8 }}
+ {{- end }}
+{{- end }}
diff --git a/bitnami/prometheus/templates/extra-list.yaml b/bitnami/prometheus/templates/extra-list.yaml
new file mode 100644
index 0000000000..9ac65f9e16
--- /dev/null
+++ b/bitnami/prometheus/templates/extra-list.yaml
@@ -0,0 +1,4 @@
+{{- range .Values.extraDeploy }}
+---
+{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
+{{- end }}
diff --git a/bitnami/prometheus/templates/server/clusterrole.yaml b/bitnami/prometheus/templates/server/clusterrole.yaml
new file mode 100644
index 0000000000..5a8b9d6e13
--- /dev/null
+++ b/bitnami/prometheus/templates/server/clusterrole.yaml
@@ -0,0 +1,49 @@
+{{- if .Values.server.rbac.create }}
+kind: ClusterRole
+apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
+metadata:
+ name: {{ include "prometheus.server.fullname.namespace" . }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: server
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+rules:
+ # These rules come from
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ - nodes/proxy
+ - nodes/metrics
+ - services
+ - endpoints
+ - pods
+ - ingresses
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - "extensions"
+ - "networking.k8s.io"
+ resources:
+ - ingresses/status
+ - ingresses
+ verbs:
+ - get
+ - list
+ - watch
+ - nonResourceURLs:
+ - "/metrics"
+ verbs:
+ - get
+ {{- if .Values.server.rbac.rules }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.server.rbac.rules "context" $ ) | nindent 2 }}
+ {{- end }}
+{{- end }}
diff --git a/bitnami/prometheus/templates/server/clusterrolebinding.yaml b/bitnami/prometheus/templates/server/clusterrolebinding.yaml
new file mode 100644
index 0000000000..88b22c9765
--- /dev/null
+++ b/bitnami/prometheus/templates/server/clusterrolebinding.yaml
@@ -0,0 +1,21 @@
+kind: ClusterRoleBinding
+apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
+metadata:
+ name: {{ template "prometheus.server.fullname.namespace" . }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: server
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "prometheus.server.fullname.namespace" . }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "prometheus.server.serviceAccountName" . }}
+ namespace: {{ include "common.names.namespace" . | quote }}
diff --git a/bitnami/prometheus/templates/server/configmap.yaml b/bitnami/prometheus/templates/server/configmap.yaml
new file mode 100644
index 0000000000..dd3ca42c05
--- /dev/null
+++ b/bitnami/prometheus/templates/server/configmap.yaml
@@ -0,0 +1,21 @@
+{{- if not .Values.existingConfigmap }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "prometheus.server.fullname" . }}
+ namespace: {{ include "common.names.namespace" . | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: server
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+data:
+ {{ include "prometheus.server.configmapKey" . }}:
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.configuration "context" $) | toYaml | nindent 4 }}
+ rules.yaml:
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.alertingRules "context" $) | toYaml | nindent 4 }}
+{{- end }}
diff --git a/bitnami/prometheus/templates/server/deployment.yaml b/bitnami/prometheus/templates/server/deployment.yaml
new file mode 100644
index 0000000000..e18718fccf
--- /dev/null
+++ b/bitnami/prometheus/templates/server/deployment.yaml
@@ -0,0 +1,277 @@
+apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }}
+kind: Deployment
+metadata:
+ name: {{ template "prometheus.server.fullname" . }}
+ namespace: {{ include "common.names.namespace" . | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: server
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ replicas: {{ .Values.server.replicaCount }}
+ {{- if .Values.server.updateStrategy }}
+ strategy: {{- toYaml .Values.server.updateStrategy | nindent 4 }}
+ {{- end }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: server
+ template:
+ metadata:
+ annotations:
+ checksum/configmap: {{ include (print $.Template.BasePath "/server/configmap.yaml") . | sha256sum }}
+ {{- if .Values.server.podAnnotations }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.podAnnotations "context" $) | nindent 8 }}
+ {{- end }}
+ labels: {{- include "common.labels.standard" . | nindent 8 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: server
+ {{- if .Values.server.podLabels }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.podLabels "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ spec:
+ serviceAccountName: {{ template "prometheus.server.serviceAccountName" . }}
+ {{- include "prometheus.imagePullSecrets" . | nindent 6 }}
+ {{- if .Values.server.hostAliases }}
+ hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.server.hostAliases "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.server.affinity }}
+ affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.server.affinity "context" $) | nindent 8 }}
+ {{- else }}
+ affinity:
+ podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.server.podAffinityPreset "component" "server" "context" $) | nindent 10 }}
+ podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.server.podAntiAffinityPreset "component" "server" "context" $) | nindent 10 }}
+ nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.server.nodeAffinityPreset.type "key" .Values.server.nodeAffinityPreset.key "values" .Values.server.nodeAffinityPreset.values) | nindent 10 }}
+ {{- end }}
+ {{- if .Values.server.nodeSelector }}
+ nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.server.nodeSelector "context" $) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.server.tolerations }}
+ tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.server.tolerations "context" .) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.server.priorityClassName }}
+ priorityClassName: {{ .Values.server.priorityClassName | quote }}
+ {{- end }}
+ {{- if .Values.server.schedulerName }}
+ schedulerName: {{ .Values.server.schedulerName | quote }}
+ {{- end }}
+ {{- if .Values.server.topologySpreadConstraints }}
+ topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.server.topologySpreadConstraints "context" .) | nindent 8 }}
+ {{- end }}
+ {{- if .Values.server.podSecurityContext.enabled }}
+ securityContext: {{- omit .Values.server.podSecurityContext "enabled" | toYaml | nindent 8 }}
+ {{- end }}
+ {{- if .Values.server.terminationGracePeriodSeconds }}
+ terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }}
+ {{- end }}
+ initContainers:
+ {{- if and .Values.volumePermissions.enabled .Values.server.persistence.enabled }}
+ - name: volume-permissions
+ image: {{ include "prometheus.volumePermissions.image" . }}
+ imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+ command:
+ - sh
+ - -ec
+ - |
+ mkdir -p {{ .Values.server.persistence.mountPath }}
+ find {{ .Values.server.persistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.server.containerSecurityContext.runAsUser }}:{{ .Values.server.podSecurityContext.fsGroup }}
+ securityContext: {{- include "common.tplvalues.render" (dict "value" .Values.volumePermissions.containerSecurityContext "context" $) | nindent 12 }}
+ {{- if .Values.volumePermissions.resources }}
+ resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: data
+ mountPath: {{ .Values.server.persistence.mountPath }}
+ {{- if .Values.server.persistence.subPath }}
+ subPath: {{ .Values.server.persistence.subPath }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.server.initContainers }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.initContainers "context" $) | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: prometheus
+ image: {{ template "prometheus.server.image" . }}
+ imagePullPolicy: {{ .Values.server.image.pullPolicy }}
+ {{- if .Values.server.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.server.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+ {{- else if .Values.server.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.server.command "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.diagnosticMode.enabled }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+ {{- else if .Values.server.args }}
+ args: {{- include "common.tplvalues.render" (dict "value" .Values.server.args "context" $) | nindent 12 }}
+ {{- else }}
+ args:
+ - "--config.file=/opt/bitnami/prometheus/conf/{{ include "prometheus.server.configmapKey" . }}"
+ - "--storage.tsdb.path={{ .Values.server.persistence.mountPath }}"
+ - "--storage.tsdb.retention.time={{ .Values.server.retention }}"
+ - "--storage.tsdb.retention.size={{ .Values.server.retentionSize }}"
+ - "--log.level={{ .Values.server.logLevel }}"
+ - "--log.format={{ .Values.server.logFormat }}"
+ - "--web.listen-address=:{{ .Values.server.containerPorts.http }}"
+ - "--web.console.libraries=/opt/bitnami/prometheus/conf/console_libraries"
+ - "--web.console.templates=/opt/bitnami/prometheus/conf/consoles"
+ {{- if .Values.server.enableAdminAPI}}
+ - "--web.enable-admin-api"
+ {{- end }}
+ {{- if .Values.server.enableRemoteWriteReceiver }}
+ - "--web.enable-remote-write-receiver"
+ {{- end }}
+ {{- if .Values.server.routePrefix }}
+ - "--web.route-prefix={{ .Values.server.routePrefix }}"
+ {{- end }}
+ {{- if .Values.server.enableFeatures }}
+ - "--enable-feature={{ join "," .Values.server.enableFeatures }}"
+ {{- end }}
+ {{- if .Values.server.extraArgs }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.extraArgs "context" $) | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ env:
+ {{- if .Values.server.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ envFrom:
+ {{- if .Values.server.extraEnvVarsCM }}
+ - configMapRef:
+ name: {{ include "common.tplvalues.render" (dict "value" .Values.server.extraEnvVarsCM "context" $) }}
+ {{- end }}
+ {{- if .Values.server.extraEnvVarsSecret }}
+ - secretRef:
+ name: {{ include "common.tplvalues.render" (dict "value" .Values.server.extraEnvVarsSecret "context" $) }}
+ {{- end }}
+ {{- if .Values.server.resources }}
+ resources: {{- toYaml .Values.server.resources | nindent 12 }}
+ {{- end }}
+ ports:
+ - name: http
+ containerPort: {{ .Values.server.containerPorts.http }}
+ {{- if not .Values.diagnosticMode.enabled }}
+ {{- if .Values.server.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.server.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.server.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.server.livenessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /-/healthy
+ port: http
+ {{- end }}
+ {{- if .Values.server.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.server.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.server.readinessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.server.readinessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /-/ready
+ port: http
+ {{- end }}
+ {{- if .Values.server.customStartupProbe }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.server.customStartupProbe "context" $) | nindent 12 }}
+ {{- else if .Values.server.startupProbe.enabled }}
+ startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.server.startupProbe "enabled") "context" $) | nindent 12 }}
+ tcpSocket:
+ port: http
+ {{- end }}
+ {{- end }}
+ {{- if .Values.server.lifecycleHooks }}
+ lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.server.lifecycleHooks "context" $) | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: config
+ mountPath: /opt/bitnami/prometheus/conf
+ readOnly: true
+ - name: data
+ mountPath: {{ .Values.server.persistence.mountPath }}
+ {{- if .Values.server.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.extraVolumeMounts "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.server.thanos.create }}
+ - name: thanos-sidecar
+ image: {{ template "prometheus.server.thanosImage" . }}
+ imagePullPolicy: {{ .Values.server.thanos.image.pullPolicy }}
+ args:
+ - sidecar
+ - --prometheus.url={{ default "http://localhost:9090" .Values.server.thanos.prometheusUrl }}
+ - --grpc-address=0.0.0.0:10901
+ - --http-address=0.0.0.0:10902
+ - --tsdb.path=/prometheus/
+ {{- if .Values.server.thanos.objectStorageConfig.secretName }}
+ - --objstore.config=$(OBJSTORE_CONFIG)
+ {{- end }}
+ {{- if .Values.server.thanos.extraArgs }}
+ {{ toYaml .Values.server.thanos.extraArgs | indent 12 | trim }}
+ {{- end }}
+ {{- if .Values.server.thanos.objectStorageConfig.secretName }}
+ env:
+ - name: OBJSTORE_CONFIG
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.server.thanos.objectStorageConfig.secretName }}
+ key: {{ .Values.server.thanos.objectStorageConfig.secretKey | default "thanos.yaml" }}
+ {{- end }}
+ {{- if .Values.server.thanos.resources }}
+ resources: {{- toYaml .Values.server.thanos.resources | nindent 12 }}
+ {{- end }}
+ ports:
+ - name: grpc
+ containerPort: 10901
+ protocol: TCP
+ - name: http
+ containerPort: 10902
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /prometheus
+ name: data
+ {{- if .Values.server.thanos.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.thanos.extraVolumeMounts "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.server.thanos.containerSecurityContext.enabled }}
+ securityContext: {{- omit .Values.server.thanos.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if .Values.server.thanos.customLivenessProbe }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.server.thanos.customLivenessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.server.thanos.livenessProbe.enabled }}
+ livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.server.thanos.livenessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /-/healthy
+ port: http
+ scheme: HTTP
+ {{- end }}
+ {{- if .Values.server.thanos.customReadinessProbe }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.server.thanos.customReadinessProbe "context" $) | nindent 12 }}
+ {{- else if .Values.server.thanos.livenessProbe.enabled }}
+ readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.server.thanos.readinessProbe "enabled") "context" $) | nindent 12 }}
+ httpGet:
+ path: /-/ready
+ port: http
+ scheme: HTTP
+ {{- end }}
+ {{- end }}
+ {{- if .Values.server.sidecars }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.server.sidecars "context" $) | nindent 8 }}
+ {{- end }}
+ volumes:
+ - name: config
+ configMap:
+ name: {{ include "prometheus.server.configmapName" . }}
+ - name: data
+ {{- if .Values.server.persistence.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ default (include "prometheus.server.fullname" .) .Values.server.persistence.existingClaim }}
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ {{- if .Values.server.extraVolumes }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.extraVolumes "context" $) | nindent 8 }}
+ {{- end }}
diff --git a/bitnami/prometheus/templates/server/ingress.yaml b/bitnami/prometheus/templates/server/ingress.yaml
new file mode 100644
index 0000000000..b09f821055
--- /dev/null
+++ b/bitnami/prometheus/templates/server/ingress.yaml
@@ -0,0 +1,64 @@
+{{- if .Values.server.ingress.enabled }}
+apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }}
+kind: Ingress
+metadata:
+ name: {{ template "prometheus.server.fullname" . }}
+ namespace: {{ include "common.names.namespace" . | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: alertmanager
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.server.ingress.annotations .Values.commonAnnotations }}
+ annotations:
+ {{- if .Values.server.ingress.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.server.ingress.annotations "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+spec:
+ {{- if and .Values.server.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }}
+ ingressClassName: {{ .Values.server.ingress.ingressClassName | quote }}
+ {{- end }}
+ rules:
+ {{- if .Values.server.ingress.hostname }}
+ - host: {{ .Values.server.ingress.hostname }}
+ http:
+ paths:
+ {{- if .Values.server.ingress.extraPaths }}
+ {{- toYaml .Values.server.ingress.extraPaths | nindent 10 }}
+ {{- end }}
+ - path: {{ .Values.server.ingress.path }}
+ {{- if eq "true" (include "common.ingress.supportsPathType" .) }}
+ pathType: {{ .Values.server.ingress.pathType }}
+ {{- end }}
+ backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" "http" "context" $) | nindent 14 }}
+ {{- end }}
+ {{- range .Values.server.ingress.extraHosts }}
+ - host: {{ .name | quote }}
+ http:
+ paths:
+ - path: {{ default "/" .path }}
+ {{- if eq "true" (include "common.ingress.supportsPathType" $) }}
+ pathType: {{ default "ImplementationSpecific" .pathType }}
+ {{- end }}
+ backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "http" "context" $) | nindent 14 }}
+ {{- end }}
+ {{- if .Values.server.ingress.extraRules }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.ingress.extraRules "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if or (and .Values.server.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.server.ingress.annotations )) .Values.server.ingress.selfSigned)) .Values.server.ingress.extraTls }}
+ tls:
+ {{- if and .Values.server.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.server.ingress.annotations )) .Values.server.ingress.selfSigned) }}
+ - hosts:
+ - {{ .Values.server.ingress.hostname | quote }}
+ secretName: {{ printf "%s-tls" .Values.server.ingress.hostname }}
+ {{- end }}
+ {{- if .Values.server.ingress.extraTls }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.ingress.extraTls "context" $) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/bitnami/prometheus/templates/server/pdb.yaml b/bitnami/prometheus/templates/server/pdb.yaml
new file mode 100644
index 0000000000..c403d7c946
--- /dev/null
+++ b/bitnami/prometheus/templates/server/pdb.yaml
@@ -0,0 +1,28 @@
+{{- $replicaCount := int .Values.replicaCount }}
+{{- if and .Values.server.pdb.create (gt $replicaCount 1) }}
+apiVersion: {{ include "common.capabilities.policy.apiVersion" . }}
+kind: PodDisruptionBudget
+metadata:
+ name: {{ include "prometheus.server.fullname" . }}
+ namespace: {{ include "common.names.namespace" . | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: server
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ {{- if .Values.server.pdb.minAvailable }}
+ minAvailable: {{ .Values.server.pdb.minAvailable }}
+ {{- end }}
+ {{- if .Values.server.pdb.maxUnavailable }}
+ maxUnavailable: {{ .Values.server.pdb.maxUnavailable }}
+ {{- end }}
+ selector:
+ matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: server
+{{- end }}
diff --git a/bitnami/prometheus/templates/server/pvc.yaml b/bitnami/prometheus/templates/server/pvc.yaml
new file mode 100644
index 0000000000..d3aac8f1c2
--- /dev/null
+++ b/bitnami/prometheus/templates/server/pvc.yaml
@@ -0,0 +1,37 @@
+{{- if and .Values.server.persistence.enabled (not .Values.server.persistence.existingClaim) -}}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ include "prometheus.server.fullname" . }}
+ namespace: {{ include "common.names.namespace" . | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: server
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.server.persistence.annotations .Values.commonAnnotations }}
+ annotations:
+ {{- if .Values.server.persistence.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.server.persistence.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+spec:
+ accessModes:
+ {{- range .Values.server.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.server.persistence.size | quote }}
+ {{- if .Values.server.persistence.selector }}
+ selector: {{- include "common.tplvalues.render" (dict "value" .Values.server.persistence.selector "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.server.persistence.dataSource }}
+ dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.server.persistence.dataSource "context" $) | nindent 4 }}
+ {{- end }}
+ {{- include "common.storage.class" (dict "persistence" .Values.server.persistence "global" .Values.global) | nindent 2 }}
+{{- end -}}
diff --git a/bitnami/prometheus/templates/server/service-account.yaml b/bitnami/prometheus/templates/server/service-account.yaml
new file mode 100644
index 0000000000..847ac63915
--- /dev/null
+++ b/bitnami/prometheus/templates/server/service-account.yaml
@@ -0,0 +1,23 @@
+{{- if .Values.server.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "prometheus.server.serviceAccountName" . }}
+ namespace: {{ include "common.names.namespace" . | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: server
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.server.serviceAccount.annotations .Values.commonAnnotations }}
+ annotations:
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.server.serviceAccount.annotations }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.serviceAccount.annotations "context" $) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+automountServiceAccountToken: {{ .Values.server.serviceAccount.automountServiceAccountToken }}
+{{- end }}
diff --git a/bitnami/prometheus/templates/server/service.yaml b/bitnami/prometheus/templates/server/service.yaml
new file mode 100644
index 0000000000..89c01539cd
--- /dev/null
+++ b/bitnami/prometheus/templates/server/service.yaml
@@ -0,0 +1,56 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "prometheus.server.fullname" . }}
+ namespace: {{ include "common.names.namespace" . | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: server
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if or .Values.server.service.annotations .Values.commonAnnotations }}
+ annotations:
+ {{- if .Values.server.service.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.server.service.annotations "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+spec:
+ type: {{ .Values.server.service.type }}
+ {{- if and .Values.server.service.clusterIP (eq .Values.server.service.type "ClusterIP") }}
+ clusterIP: {{ .Values.server.service.clusterIP }}
+ {{- end }}
+ {{- if .Values.server.service.sessionAffinity }}
+ sessionAffinity: {{ .Values.server.service.sessionAffinity }}
+ {{- end }}
+ {{- if .Values.server.service.sessionAffinityConfig }}
+ sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.server.service.sessionAffinityConfig "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if or (eq .Values.server.service.type "LoadBalancer") (eq .Values.server.service.type "NodePort") }}
+ externalTrafficPolicy: {{ .Values.server.service.externalTrafficPolicy | quote }}
+ {{- end }}
+ {{- if and (eq .Values.server.service.type "LoadBalancer") (not (empty .Values.server.service.loadBalancerSourceRanges)) }}
+ loadBalancerSourceRanges: {{ .Values.server.service.loadBalancerSourceRanges }}
+ {{- end }}
+ {{- if and (eq .Values.server.service.type "LoadBalancer") (not (empty .Values.server.service.loadBalancerIP)) }}
+ loadBalancerIP: {{ .Values.server.service.loadBalancerIP }}
+ {{- end }}
+ ports:
+ - name: http
+ port: {{ .Values.server.service.ports.http }}
+ targetPort: http
+ protocol: TCP
+ {{- if and (or (eq .Values.server.service.type "NodePort") (eq .Values.server.service.type "LoadBalancer")) (not (empty .Values.server.service.nodePorts.http)) }}
+ nodePort: {{ .Values.server.service.nodePorts.http }}
+ {{- else if eq .Values.server.service.type "ClusterIP" }}
+ nodePort: null
+ {{- end }}
+ {{- if .Values.server.service.extraPorts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.service.extraPorts "context" $) | nindent 4 }}
+ {{- end }}
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: server
\ No newline at end of file
diff --git a/bitnami/prometheus/templates/server/thanos-ingress.yaml b/bitnami/prometheus/templates/server/thanos-ingress.yaml
new file mode 100644
index 0000000000..91bc6ea1f6
--- /dev/null
+++ b/bitnami/prometheus/templates/server/thanos-ingress.yaml
@@ -0,0 +1,63 @@
+{{- if and .Values.server.thanos.create .Values.server.thanos.ingress.enabled }}
+apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }}
+kind: Ingress
+metadata:
+ name: {{ template "prometheus.thanos-sidecar.fullname" . }}
+ namespace: {{ include "common.names.namespace" . | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: server
+ app.kubernetes.io/subcomponent: thanos
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ annotations:
+ {{- if .Values.server.thanos.ingress.annotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.server.thanos.ingress.annotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ {{- if and .Values.server.thanos.ingress.ingressClassName (include "common.ingress.supportsIngressClassname" .) }}
+ ingressClassName: {{ .Values.server.thanos.ingress.ingressClassName | quote }}
+ {{- end }}
+ rules:
+ {{- if .Values.server.thanos.ingress.hostname }}
+ - host: {{ .Values.server.thanos.ingress.hostname }}
+ http:
+ paths:
+ {{- if .Values.server.thanos.ingress.extraPaths }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.thanos.ingress.extraPaths "context" $) | nindent 10 }}
+ {{- end }}
+ - path: {{ .Values.server.thanos.ingress.path }}
+ {{- if eq "true" (include "common.ingress.supportsPathType" .) }}
+ pathType: {{ .Values.server.thanos.ingress.pathType }}
+ {{- end }}
+ backend: {{- include "common.ingress.backend" (dict "serviceName" (include "prometheus.thanos-sidecar.fullname" $) "servicePort" "grpc" "context" $) | nindent 14 }}
+ {{- end }}
+ {{- range .Values.server.thanos.ingress.extraHosts }}
+ - host: {{ .name | quote }}
+ http:
+ paths:
+ - path: {{ default "/" .path }}
+ {{- if eq "true" (include "common.ingress.supportsPathType" $) }}
+ pathType: {{ default "ImplementationSpecific" .pathType }}
+ {{- end }}
+ backend: {{- include "common.ingress.backend" (dict "serviceName" (include "prometheus.thanos-sidecar.fullname" $) "servicePort" "grpc" "context" $) | nindent 14 }}
+ {{- end }}
+ {{- if .Values.server.thanos.ingress.extraRules }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.thanos.ingress.extraRules "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if or (and .Values.server.thanos.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.server.thanos.ingress.annotations )) .Values.server.thanos.ingress.selfSigned)) .Values.server.thanos.ingress.extraTls }}
+ tls:
+ {{- if and .Values.server.thanos.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.server.thanos.ingress.annotations )) .Values.server.thanos.ingress.selfSigned) }}
+ - hosts:
+ - {{ .Values.server.thanos.ingress.hostname | quote }}
+ secretName: {{ printf "%s-tls" .Values.server.thanos.ingress.hostname }}
+ {{- end }}
+ {{- if .Values.server.thanos.ingress.extraTls }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.thanos.ingress.extraTls "context" $) | nindent 4 }}
+ {{- end }}
+ {{- end }}
+{{- end -}}
diff --git a/bitnami/prometheus/templates/server/thanos-service.yaml b/bitnami/prometheus/templates/server/thanos-service.yaml
new file mode 100644
index 0000000000..b9a4d7c845
--- /dev/null
+++ b/bitnami/prometheus/templates/server/thanos-service.yaml
@@ -0,0 +1,55 @@
+{{- if .Values.server.thanos.create }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "prometheus.thanos-sidecar.fullname" . }}
+ namespace: {{ include "common.names.namespace" . | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: server
+ app.kubernetes.io/subcomponent: thanos
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ annotations:
+ {{- if .Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.server.thanos.service.annotations }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.thanos.service.annotations "context" $) | nindent 4 }}
+ {{- end }}
+spec:
+ type: {{ .Values.server.thanos.service.type }}
+ {{- if and (eq .Values.server.thanos.service.type "LoadBalancer") (not (empty .Values.server.thanos.service.loadBalancerIP)) }}
+ loadBalancerIP: {{ .Values.server.thanos.service.loadBalancerIP }}
+ {{- end }}
+ {{- if and (eq .Values.server.thanos.service.type "LoadBalancer") (not (empty .Values.server.thanos.service.loadBalancerSourceRanges)) }}
+ loadBalancerSourceRanges: {{ .Values.server.thanos.service.loadBalancerSourceRanges }}
+ {{- end }}
+ {{- if and .Values.server.thanos.service.clusterIP (eq .Values.server.thanos.service.type "ClusterIP") }}
+ clusterIP: {{ .Values.server.thanos.service.clusterIP }}
+ {{- end }}
+ {{- if .Values.server.thanos.service.sessionAffinity }}
+ sessionAffinity: {{ .Values.server.thanos.service.sessionAffinity }}
+ {{- end }}
+ {{- if .Values.server.thanos.service.sessionAffinityConfig }}
+ sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.server.thanos.service.sessionAffinityConfig "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if or (eq .Values.server.thanos.service.type "LoadBalancer") (eq .Values.server.thanos.service.type "NodePort") }}
+ externalTrafficPolicy: {{ .Values.server.thanos.service.externalTrafficPolicy | quote }}
+ {{- end }}
+ ports:
+ - name: grpc
+ port: {{ .Values.server.thanos.service.ports.grpc }}
+ targetPort: grpc
+ protocol: TCP
+ {{- if and .Values.server.thanos.service.nodePorts.grpc (or (eq .Values.server.thanos.service.type "NodePort") (eq .Values.server.thanos.service.type "LoadBalancer")) }}
+ nodePort: {{ .Values.server.thanos.service.nodePorts.grpc }}
+ {{- end }}
+ {{- if .Values.server.thanos.service.extraPorts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.thanos.service.extraPorts "context" $) | nindent 4 }}
+ {{- end }}
+ selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: server
+{{- end }}
diff --git a/bitnami/prometheus/templates/tls-secret.yaml b/bitnami/prometheus/templates/tls-secret.yaml
new file mode 100644
index 0000000000..6bf01bbcea
--- /dev/null
+++ b/bitnami/prometheus/templates/tls-secret.yaml
@@ -0,0 +1,151 @@
+{{- if or .Values.server.ingress.enabled
+ (and .Values.alertmanager.enabled .Values.alertmanager.ingress.enabled)
+ (and .Values.server.thanos.create .Values.server.thanos.ingress.enabled) }}
+{{- if .Values.server.ingress.secrets }}
+{{- range .Values.server.ingress.secrets }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .name }}
+ namespace: {{ include "common.names.namespace" $ | quote }}
+ labels: {{- include "common.labels.standard" $ | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: server
+ {{- if $.Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if $.Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+type: kubernetes.io/tls
+data:
+ tls.crt: {{ .certificate | b64enc }}
+ tls.key: {{ .key | b64enc }}
+---
+{{- end }}
+{{- end }}
+{{- if .Values.alertmanager.ingress.secrets }}
+{{- range .Values.alertmanager.ingress.secrets }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .name }}
+ namespace: {{ include "common.names.namespace" $ | quote }}
+ labels: {{- include "common.labels.standard" $ | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: alertmanager
+ {{- if $.Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if $.Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+type: kubernetes.io/tls
+data:
+ tls.crt: {{ .certificate | b64enc }}
+ tls.key: {{ .key | b64enc }}
+---
+{{- end }}
+{{- end }}
+{{- if .Values.server.thanos.ingress.secrets }}
+{{- range .Values.server.thanos.ingress.secrets }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .name }}
+ namespace: {{ include "common.names.namespace" $ | quote }}
+ labels: {{- include "common.labels.standard" $ | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: server
+ app.kubernetes.io/subcomponent: thanos
+ {{- if $.Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if $.Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+type: kubernetes.io/tls
+data:
+ tls.crt: {{ .certificate | b64enc }}
+ tls.key: {{ .key | b64enc }}
+---
+{{- end }}
+{{- end }}
+{{- if (or (and .Values.server.ingress.tls .Values.server.ingress.selfSigned)
+ (and .Values.alertmanager.ingress.tls .Values.alertmanager.ingress.selfSigned)
+ (and .Values.server.thanos.ingress.tls .Values.server.thanos.ingress.selfSigned)) }}
+{{- $ca := genCA "prometheus-ca" 365 }}
+{{- if and .Values.server.ingress.tls .Values.server.ingress.selfSigned }}
+{{- $secretName := printf "%s-tls" .Values.server.ingress.hostname }}
+{{- $cert := genSignedCert .Values.server.ingress.hostname nil (list .Values.server.ingress.hostname) 365 $ca }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ $secretName }}
+ namespace: {{ include "common.names.namespace" . | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: server
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+type: kubernetes.io/tls
+data:
+ tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }}
+ tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }}
+ ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }}
+---
+{{- end }}
+{{- if and .Values.alertmanager.ingress.tls .Values.alertmanager.ingress.selfSigned }}
+{{- $secretName := printf "%s-tls" .Values.alertmanager.ingress.hostname }}
+{{- $cert := genSignedCert .Values.alertmanager.ingress.hostname nil (list .Values.alertmanager.ingress.hostname) 365 $ca }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ $secretName }}
+ namespace: {{ include "common.names.namespace" . | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: alertmanager
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+type: kubernetes.io/tls
+data:
+ tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }}
+ tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }}
+ ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }}
+{{- end }}
+---
+{{- if and .Values.server.thanos.ingress.tls .Values.server.thanos.ingress.selfSigned }}
+{{- $secretName := printf "%s-tls" .Values.server.thanos.ingress.hostname }}
+{{- $cert := genSignedCert .Values.server.thanos.ingress.hostname nil (list .Values.server.thanos.ingress.hostname) 365 $ca }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ $secretName }}
+ namespace: {{ include "common.names.namespace" . | quote }}
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ app.kubernetes.io/part-of: prometheus
+ app.kubernetes.io/component: server
+ app.kubernetes.io/subcomponent: thanos
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+type: kubernetes.io/tls
+data:
+ tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }}
+ tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }}
+ ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }}
+{{- end }}
+{{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/bitnami/prometheus/values.schema.json b/bitnami/prometheus/values.schema.json
new file mode 100644
index 0000000000..c59b1eae98
--- /dev/null
+++ b/bitnami/prometheus/values.schema.json
@@ -0,0 +1,1954 @@
+{
+ "title": "Chart Values",
+ "type": "object",
+ "properties": {
+ "global": {
+ "type": "object",
+ "properties": {
+ "imageRegistry": {
+ "type": "string",
+ "description": "Global Docker image registry",
+ "default": ""
+ },
+ "imagePullSecrets": {
+ "type": "array",
+ "description": "Global Docker registry secret names as an array",
+ "default": [],
+ "items": {}
+ },
+ "storageClass": {
+ "type": "string",
+ "description": "Global StorageClass for Persistent Volume(s)",
+ "default": ""
+ }
+ }
+ },
+ "kubeVersion": {
+ "type": "string",
+ "description": "Override Kubernetes version",
+ "default": ""
+ },
+ "nameOverride": {
+ "type": "string",
+ "description": "String to partially override common.names.name",
+ "default": ""
+ },
+ "fullnameOverride": {
+ "type": "string",
+ "description": "String to fully override common.names.fullname",
+ "default": ""
+ },
+ "namespaceOverride": {
+ "type": "string",
+ "description": "String to fully override common.names.namespace",
+ "default": ""
+ },
+ "commonLabels": {
+ "type": "object",
+ "description": "Labels to add to all deployed objects",
+ "default": {}
+ },
+ "commonAnnotations": {
+ "type": "object",
+ "description": "Annotations to add to all deployed objects",
+ "default": {}
+ },
+ "clusterDomain": {
+ "type": "string",
+ "description": "Kubernetes cluster domain name",
+ "default": "cluster.local"
+ },
+ "extraDeploy": {
+ "type": "array",
+ "description": "Array of extra objects to deploy with the release",
+ "default": [],
+ "items": {}
+ },
+ "diagnosticMode": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Enable diagnostic mode (all probes will be disabled and the command will be overridden)",
+ "default": false
+ },
+ "command": {
+ "type": "array",
+ "description": "Command to override all containers in the deployment",
+ "default": [
+ "sleep"
+ ],
+ "items": {
+ "type": "string"
+ }
+ },
+ "args": {
+ "type": "array",
+ "description": "Args to override all containers in the deployment",
+ "default": [
+ "infinity"
+ ],
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "ingress": {
+ "type": "object",
+ "properties": {
+ "apiVersion": {
+ "type": "string",
+ "description": "Force Ingress API version (automatically detected if not set)",
+ "default": ""
+ }
+ }
+ },
+ "alertmanager": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Alertmanager enabled",
+ "default": true
+ },
+ "image": {
+ "type": "object",
+ "properties": {
+ "registry": {
+ "type": "string",
+ "description": "Alertmanager image registry",
+ "default": "docker.io"
+ },
+ "repository": {
+ "type": "string",
+ "description": "Alertmanager image repository",
+ "default": "bitnami/alertmanager"
+ },
+ "tag": {
+ "type": "string",
+ "description": "Alertmanager image tag (immutable tags are recommended)",
+ "default": "0.25.0-debian-11-r48"
+ },
+ "digest": {
+ "type": "string",
+ "description": "Alertmanager image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended)",
+ "default": ""
+ },
+ "pullPolicy": {
+ "type": "string",
+ "description": "Alertmanager image pull policy",
+ "default": "IfNotPresent"
+ },
+ "pullSecrets": {
+ "type": "array",
+ "description": "Alertmanager image pull secrets",
+ "default": [],
+ "items": {}
+ }
+ }
+ },
+ "configuration": {
+ "type": "string",
+ "description": "Alertmanager configuration. This content will be stored in the the alertmanager.yaml file and the content can be a template.",
+ "default": "receivers:\n - name: default-receiver\nroute:\n group_wait: 10s\n group_interval: 5m\n receiver: default-receiver\n repeat_interval: 3h\n"
+ },
+ "replicaCount": {
+ "type": "number",
+ "description": "Number of Alertmanager replicas to deploy",
+ "default": 1
+ },
+ "containerPorts": {
+ "type": "object",
+ "properties": {
+ "http": {
+ "type": "number",
+ "description": "Alertmanager HTTP container port",
+ "default": 9093
+ },
+ "cluster": {
+ "type": "number",
+ "description": "Alertmanager Cluster HA port",
+ "default": 9094
+ }
+ }
+ },
+ "livenessProbe": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Enable livenessProbe on Alertmanager containers",
+ "default": true
+ },
+ "initialDelaySeconds": {
+ "type": "number",
+ "description": "Initial delay seconds for livenessProbe",
+ "default": 5
+ },
+ "periodSeconds": {
+ "type": "number",
+ "description": "Period seconds for livenessProbe",
+ "default": 20
+ },
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "Timeout seconds for livenessProbe",
+ "default": 3
+ },
+ "failureThreshold": {
+ "type": "number",
+ "description": "Failure threshold for livenessProbe",
+ "default": 3
+ },
+ "successThreshold": {
+ "type": "number",
+ "description": "Success threshold for livenessProbe",
+ "default": 1
+ }
+ }
+ },
+ "readinessProbe": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Enable readinessProbe on Alertmanager containers",
+ "default": true
+ },
+ "initialDelaySeconds": {
+ "type": "number",
+ "description": "Initial delay seconds for readinessProbe",
+ "default": 5
+ },
+ "periodSeconds": {
+ "type": "number",
+ "description": "Period seconds for readinessProbe",
+ "default": 10
+ },
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "Timeout seconds for readinessProbe",
+ "default": 2
+ },
+ "failureThreshold": {
+ "type": "number",
+ "description": "Failure threshold for readinessProbe",
+ "default": 5
+ },
+ "successThreshold": {
+ "type": "number",
+ "description": "Success threshold for readinessProbe",
+ "default": 1
+ }
+ }
+ },
+ "startupProbe": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Enable startupProbe on Alertmanager containers",
+ "default": false
+ },
+ "initialDelaySeconds": {
+ "type": "number",
+ "description": "Initial delay seconds for startupProbe",
+ "default": 2
+ },
+ "periodSeconds": {
+ "type": "number",
+ "description": "Period seconds for startupProbe",
+ "default": 5
+ },
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "Timeout seconds for startupProbe",
+ "default": 2
+ },
+ "failureThreshold": {
+ "type": "number",
+ "description": "Failure threshold for startupProbe",
+ "default": 10
+ },
+ "successThreshold": {
+ "type": "number",
+ "description": "Success threshold for startupProbe",
+ "default": 1
+ }
+ }
+ },
+ "customLivenessProbe": {
+ "type": "object",
+ "description": "Custom livenessProbe that overrides the default one",
+ "default": {}
+ },
+ "customReadinessProbe": {
+ "type": "object",
+ "description": "Custom readinessProbe that overrides the default one",
+ "default": {}
+ },
+ "customStartupProbe": {
+ "type": "object",
+ "description": "Custom startupProbe that overrides the default one",
+ "default": {}
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {
+ "type": "object",
+ "description": "The resources limits for the Alertmanager containers",
+ "default": {}
+ },
+ "requests": {
+ "type": "object",
+ "description": "The requested resources for the Alertmanager containers",
+ "default": {}
+ }
+ }
+ },
+ "podSecurityContext": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Enabled Alertmanager pods' Security Context",
+ "default": true
+ },
+ "fsGroup": {
+ "type": "number",
+ "description": "Set Alertmanager pod's Security Context fsGroup",
+ "default": 1001
+ }
+ }
+ },
+ "containerSecurityContext": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Enabled Alertmanager containers' Security Context",
+ "default": true
+ },
+ "runAsUser": {
+ "type": "number",
+ "description": "Set Alertmanager containers' Security Context runAsUser",
+ "default": 1001
+ },
+ "runAsNonRoot": {
+ "type": "boolean",
+ "description": "Set Alertmanager containers' Security Context runAsNonRoot",
+ "default": true
+ },
+ "readOnlyRootFilesystem": {
+ "type": "boolean",
+ "description": "Set Alertmanager containers' Security Context runAsNonRoot",
+ "default": false
+ }
+ }
+ },
+ "existingConfigmap": {
+ "type": "string",
+ "description": "The name of an existing ConfigMap with your custom configuration for Alertmanager",
+ "default": ""
+ },
+ "existingConfigmapKey": {
+ "type": "string",
+ "description": "The name of the key with the Alertmanager config file",
+ "default": ""
+ },
+ "command": {
+ "type": "array",
+ "description": "Override default container command (useful when using custom images)",
+ "default": [],
+ "items": {}
+ },
+ "args": {
+ "type": "array",
+ "description": "Override default container args (useful when using custom images)",
+ "default": [],
+ "items": {}
+ },
+ "extraArgs": {
+ "type": "array",
+ "description": "Additional arguments passed to the Prometheus server container",
+ "default": [],
+ "items": {}
+ },
+ "hostAliases": {
+ "type": "array",
+ "description": "Alertmanager pods host aliases",
+ "default": [],
+ "items": {}
+ },
+ "podLabels": {
+ "type": "object",
+ "description": "Extra labels for Alertmanager pods",
+ "default": {}
+ },
+ "podAnnotations": {
+ "type": "object",
+ "description": "Annotations for Alertmanager pods",
+ "default": {}
+ },
+ "podAffinityPreset": {
+ "type": "string",
+ "description": "Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`",
+ "default": ""
+ },
+ "podAntiAffinityPreset": {
+ "type": "string",
+ "description": "Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`",
+ "default": "soft"
+ },
+ "pdb": {
+ "type": "object",
+ "properties": {
+ "create": {
+ "type": "boolean",
+ "description": "Enable/disable a Pod Disruption Budget creation",
+ "default": false
+ },
+ "minAvailable": {
+ "type": "number",
+ "description": "Minimum number/percentage of pods that should remain scheduled",
+ "default": 1
+ },
+ "maxUnavailable": {
+ "type": "string",
+ "description": "Maximum number/percentage of pods that may be made unavailable",
+ "default": ""
+ }
+ }
+ },
+ "nodeAffinityPreset": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`",
+ "default": ""
+ },
+ "key": {
+ "type": "string",
+ "description": "Node label key to match. Ignored if `affinity` is set",
+ "default": ""
+ },
+ "values": {
+ "type": "array",
+ "description": "Node label values to match. Ignored if `affinity` is set",
+ "default": [],
+ "items": {}
+ }
+ }
+ },
+ "affinity": {
+ "type": "object",
+ "description": "Affinity for Alertmanager pods assignment",
+ "default": {}
+ },
+ "nodeSelector": {
+ "type": "object",
+ "description": "Node labels for Alertmanager pods assignment",
+ "default": {}
+ },
+ "tolerations": {
+ "type": "array",
+ "description": "Tolerations for Alertmanager pods assignment",
+ "default": [],
+ "items": {}
+ },
+ "updateStrategy": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "Alertmanager statefulset strategy type",
+ "default": "RollingUpdate"
+ }
+ }
+ },
+ "podManagementPolicy": {
+ "type": "string",
+ "description": "Statefulset Pod management policy, it needs to be Parallel to be able to complete the cluster join",
+ "default": "OrderedReady"
+ },
+ "priorityClassName": {
+ "type": "string",
+ "description": "Alertmanager pods' priorityClassName",
+ "default": ""
+ },
+ "topologySpreadConstraints": {
+ "type": "array",
+ "description": "Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template",
+ "default": [],
+ "items": {}
+ },
+ "schedulerName": {
+ "type": "string",
+ "description": "Name of the k8s scheduler (other than default) for Alertmanager pods",
+ "default": ""
+ },
+ "terminationGracePeriodSeconds": {
+ "type": "string",
+ "description": "Seconds Redmine pod needs to terminate gracefully",
+ "default": ""
+ },
+ "lifecycleHooks": {
+ "type": "object",
+ "description": "for the Alertmanager container(s) to automate configuration before or after startup",
+ "default": {}
+ },
+ "extraEnvVars": {
+ "type": "array",
+ "description": "Array with extra environment variables to add to Alertmanager nodes",
+ "default": [],
+ "items": {}
+ },
+ "extraEnvVarsCM": {
+ "type": "string",
+ "description": "Name of existing ConfigMap containing extra env vars for Alertmanager nodes",
+ "default": ""
+ },
+ "extraEnvVarsSecret": {
+ "type": "string",
+ "description": "Name of existing Secret containing extra env vars for Alertmanager nodes",
+ "default": ""
+ },
+ "extraVolumes": {
+ "type": "array",
+ "description": "Optionally specify extra list of additional volumes for the Alertmanager pod(s)",
+ "default": [],
+ "items": {}
+ },
+ "extraVolumeMounts": {
+ "type": "array",
+ "description": "Optionally specify extra list of additional volumeMounts for the Alertmanager container(s)",
+ "default": [],
+ "items": {}
+ },
+ "sidecars": {
+ "type": "array",
+ "description": "Add additional sidecar containers to the Alertmanager pod(s)",
+ "default": [],
+ "items": {}
+ },
+ "initContainers": {
+ "type": "array",
+ "description": "Add additional init containers to the Alertmanager pod(s)",
+ "default": [],
+ "items": {}
+ },
+ "ingress": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Enable ingress record generation for Alertmanager",
+ "default": false
+ },
+ "pathType": {
+ "type": "string",
+ "description": "Ingress path type",
+ "default": "ImplementationSpecific"
+ },
+ "hostname": {
+ "type": "string",
+ "description": "Default host for the ingress record",
+ "default": "alertmanager.prometheus.local"
+ },
+ "ingressClassName": {
+ "type": "string",
+ "description": "IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)",
+ "default": ""
+ },
+ "path": {
+ "type": "string",
+ "description": "Default path for the ingress record",
+ "default": "/"
+ },
+ "annotations": {
+ "type": "object",
+ "description": "Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.",
+ "default": {}
+ },
+ "tls": {
+ "type": "boolean",
+ "description": "Enable TLS configuration for the host defined at `ingress.hostname` parameter",
+ "default": false
+ },
+ "selfSigned": {
+ "type": "boolean",
+ "description": "Create a TLS secret for this ingress record using self-signed certificates generated by Helm",
+ "default": false
+ },
+ "extraHosts": {
+ "type": "array",
+ "description": "An array with additional hostname(s) to be covered with the ingress record",
+ "default": [],
+ "items": {}
+ },
+ "extraPaths": {
+ "type": "array",
+ "description": "An array with additional arbitrary paths that may need to be added to the ingress under the main host",
+ "default": [],
+ "items": {}
+ },
+ "extraTls": {
+ "type": "array",
+ "description": "TLS configuration for additional hostname(s) to be covered with this ingress record",
+ "default": [],
+ "items": {}
+ },
+ "secrets": {
+ "type": "array",
+ "description": "Custom TLS certificates as secrets",
+ "default": [],
+ "items": {}
+ },
+ "extraRules": {
+ "type": "array",
+ "description": "Additional rules to be covered with this ingress record",
+ "default": [],
+ "items": {}
+ }
+ }
+ },
+ "serviceAccount": {
+ "type": "object",
+ "properties": {
+ "create": {
+ "type": "boolean",
+ "description": "Specifies whether a ServiceAccount should be created",
+ "default": true
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the ServiceAccount to use.",
+ "default": ""
+ },
+ "annotations": {
+ "type": "object",
+ "description": "Additional Service Account annotations (evaluated as a template)",
+ "default": {}
+ },
+ "automountServiceAccountToken": {
+ "type": "boolean",
+ "description": "Automount service account token for the server service account",
+ "default": true
+ }
+ }
+ },
+ "service": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "Alertmanager service type",
+ "default": "LoadBalancer"
+ },
+ "ports": {
+ "type": "object",
+ "properties": {
+ "http": {
+ "type": "number",
+ "description": "Alertmanager service HTTP port",
+ "default": 80
+ },
+ "cluster": {
+ "type": "number",
+ "description": "Alertmanager cluster HA port",
+ "default": 9094
+ }
+ }
+ },
+ "nodePorts": {
+ "type": "object",
+ "properties": {
+ "http": {
+ "type": "string",
+ "description": "Node port for HTTP",
+ "default": ""
+ }
+ }
+ },
+ "clusterIP": {
+ "type": "string",
+ "description": "Alertmanager service Cluster IP",
+ "default": ""
+ },
+ "loadBalancerIP": {
+ "type": "string",
+ "description": "Alertmanager service Load Balancer IP",
+ "default": ""
+ },
+ "loadBalancerSourceRanges": {
+ "type": "array",
+ "description": "Alertmanager service Load Balancer sources",
+ "default": [],
+ "items": {}
+ },
+ "externalTrafficPolicy": {
+ "type": "string",
+ "description": "Alertmanager service external traffic policy",
+ "default": "Cluster"
+ },
+ "annotations": {
+ "type": "object",
+ "description": "Additional custom annotations for Alertmanager service",
+ "default": {}
+ },
+ "extraPorts": {
+ "type": "array",
+ "description": "Extra ports to expose in Alertmanager service (normally used with the `sidecars` value)",
+ "default": [],
+ "items": {}
+ },
+ "sessionAffinity": {
+ "type": "string",
+ "description": "Control where client requests go, to the same pod or round-robin",
+ "default": "None"
+ },
+ "sessionAffinityConfig": {
+ "type": "object",
+ "description": "Additional settings for the sessionAffinity",
+ "default": {}
+ }
+ }
+ },
+ "persistence": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Enable Alertmanager data persistence using VolumeClaimTemplates",
+ "default": false
+ },
+ "mountPath": {
+ "type": "string",
+ "description": "Path to mount the volume at.",
+ "default": "/bitnami/alertmanager/data"
+ },
+ "subPath": {
+ "type": "string",
+ "description": "The subdirectory of the volume to mount to, useful in dev environments and one PV for multiple services",
+ "default": ""
+ },
+ "storageClass": {
+ "type": "string",
+ "description": "PVC Storage Class for Concourse worker data volume",
+ "default": ""
+ },
+ "accessModes": {
+ "type": "array",
+ "description": "PVC Access Mode for Concourse worker volume",
+ "default": [
+ "ReadWriteOnce"
+ ],
+ "items": {
+ "type": "string"
+ }
+ },
+ "size": {
+ "type": "string",
+ "description": "PVC Storage Request for Concourse worker volume",
+ "default": "8Gi"
+ },
+ "annotations": {
+ "type": "object",
+ "description": "Annotations for the PVC",
+ "default": {}
+ },
+ "selector": {
+ "type": "object",
+ "description": "Selector to match an existing Persistent Volume (this value is evaluated as a template)",
+ "default": {}
+ }
+ }
+ }
+ }
+ },
+ "server": {
+ "type": "object",
+ "properties": {
+ "image": {
+ "type": "object",
+ "properties": {
+ "registry": {
+ "type": "string",
+ "description": "Prometheus image registry",
+ "default": "docker.io"
+ },
+ "repository": {
+ "type": "string",
+ "description": "Prometheus image repository",
+ "default": "bitnami/prometheus"
+ },
+ "tag": {
+ "type": "string",
+ "description": "Prometheus image tag (immutable tags are recommended)",
+ "default": "2.44.0-debian-11-r0"
+ },
+ "digest": {
+ "type": "string",
+ "description": "Prometheus image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended)",
+ "default": ""
+ },
+ "pullPolicy": {
+ "type": "string",
+ "description": "Prometheus image pull policy",
+ "default": "IfNotPresent"
+ },
+ "pullSecrets": {
+ "type": "array",
+ "description": "Prometheus image pull secrets",
+ "default": [],
+ "items": {}
+ }
+ }
+ },
+ "configuration": {
+ "type": "string",
+ "description": "Promethus configuration. This content will be stored in the the prometheus.yaml file and the content can be a template.",
+ "default": "global:\n {{- if .Values.server.scrapeInterval }}\n scrape_interval: {{ .Values.server.scrapeInterval }}\n {{- end }}\n {{- if .Values.server.scrapeTimeout }}\n scrape_timeout: {{ .Values.server.scrapeTimeout }}\n {{- end }}\n {{- if .Values.server.evaluationInterval }}\n evaluation_interval: {{ .Values.server.evaluationInterval }}\n {{- end }}\n external_labels:\n monitor: {{ template \"common.names.fullname\" . }}\n {{- if .Values.server.externalLabels }}\n {{- include \"common.tplvalues.render\" (dict \"value\" .Values.server.externalLabels \"context\" $) | nindent 4 }}\n {{- end }}\n{{- if .Values.server.remoteWrite }}\nremote_write: {{- include \"common.tplvalues.render\" (dict \"value\" .Values.server.remoteWrite \"context\" $) | nindent 4 }}\n{{- end }}\nscrape_configs:\n - job_name: prometheus\n {{- include \"prometheus.scrape_config\" (dict \"component\" \"server\" \"context\" $) | nindent 4 }}\n{{- if .Values.alertmanager.enabled }}\n - job_name: alertmanager\n {{- include \"prometheus.scrape_config\" (dict \"component\" \"alertmanager\" \"context\" $) | nindent 4 }}\n{{- end }}\n{{- if .Values.server.extraScrapeConfigs}}\n{{- include \"common.tplvalues.render\" (dict \"value\" .Values.server.extraScrapeConfigs \"context\" $) | nindent 2 }}\n{{- end }}\n{{- if or .Values.alertmanager.enabled .Values.server.alertingEndpoints}}\nalerting:\n alertmanagers:\n {{- if .Values.server.alertingEndpoints }}\n {{- include \"common.tplvalues.render\" (dict \"value\" .Values.server.alertingEndpoints \"context\" $) | nindent 4 }}\n {{- end }}\n - scheme: HTTP\n static_configs:\n - targets: [ {{ printf \"%s:%d\" (include \"prometheus.alertmanager.fullname\" .) (int .Values.alertmanager.service.ports.http) }}]\nrule_files:\n - rules.yaml\n{{- end }}\n"
+ },
+ "alertingRules": {
+ "type": "object",
+ "description": "Prometheus alerting rules. This content will be stored in the the rules.yaml file and the content can be a template.",
+ "default": {}
+ },
+ "extraScrapeConfigs": {
+ "type": "array",
+ "description": "Promethus configuration, useful to declare new scrape_configs. This content will be merged with the 'server.configuration' value and stored in the the prometheus.yaml file.",
+ "default": [],
+ "items": {}
+ },
+ "replicaCount": {
+ "type": "number",
+ "description": "Number of Prometheus replicas to deploy",
+ "default": 1
+ },
+ "containerPorts": {
+ "type": "object",
+ "properties": {
+ "http": {
+ "type": "number",
+ "description": "Prometheus HTTP container port",
+ "default": 9090
+ }
+ }
+ },
+ "livenessProbe": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Enable livenessProbe on Prometheus containers",
+ "default": true
+ },
+ "initialDelaySeconds": {
+ "type": "number",
+ "description": "Initial delay seconds for livenessProbe",
+ "default": 5
+ },
+ "periodSeconds": {
+ "type": "number",
+ "description": "Period seconds for livenessProbe",
+ "default": 20
+ },
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "Timeout seconds for livenessProbe",
+ "default": 3
+ },
+ "failureThreshold": {
+ "type": "number",
+ "description": "Failure threshold for livenessProbe",
+ "default": 3
+ },
+ "successThreshold": {
+ "type": "number",
+ "description": "Success threshold for livenessProbe",
+ "default": 1
+ }
+ }
+ },
+ "readinessProbe": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Enable readinessProbe on Prometheus containers",
+ "default": true
+ },
+ "initialDelaySeconds": {
+ "type": "number",
+ "description": "Initial delay seconds for readinessProbe",
+ "default": 5
+ },
+ "periodSeconds": {
+ "type": "number",
+ "description": "Period seconds for readinessProbe",
+ "default": 10
+ },
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "Timeout seconds for readinessProbe",
+ "default": 2
+ },
+ "failureThreshold": {
+ "type": "number",
+ "description": "Failure threshold for readinessProbe",
+ "default": 5
+ },
+ "successThreshold": {
+ "type": "number",
+ "description": "Success threshold for readinessProbe",
+ "default": 1
+ }
+ }
+ },
+ "startupProbe": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Enable startupProbe on Prometheus containers",
+ "default": false
+ },
+ "initialDelaySeconds": {
+ "type": "number",
+ "description": "Initial delay seconds for startupProbe",
+ "default": 2
+ },
+ "periodSeconds": {
+ "type": "number",
+ "description": "Period seconds for startupProbe",
+ "default": 5
+ },
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "Timeout seconds for startupProbe",
+ "default": 2
+ },
+ "failureThreshold": {
+ "type": "number",
+ "description": "Failure threshold for startupProbe",
+ "default": 10
+ },
+ "successThreshold": {
+ "type": "number",
+ "description": "Success threshold for startupProbe",
+ "default": 1
+ }
+ }
+ },
+ "customLivenessProbe": {
+ "type": "object",
+ "description": "Custom livenessProbe that overrides the default one",
+ "default": {}
+ },
+ "customReadinessProbe": {
+ "type": "object",
+ "description": "Custom readinessProbe that overrides the default one",
+ "default": {}
+ },
+ "customStartupProbe": {
+ "type": "object",
+ "description": "Custom startupProbe that overrides the default one",
+ "default": {}
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {
+ "type": "object",
+ "description": "The resources limits for the Prometheus containers",
+ "default": {}
+ },
+ "requests": {
+ "type": "object",
+ "description": "The requested resources for the Prometheus containers",
+ "default": {}
+ }
+ }
+ },
+ "podSecurityContext": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Enabled Prometheus pods' Security Context",
+ "default": true
+ },
+ "fsGroup": {
+ "type": "number",
+ "description": "Set Prometheus pod's Security Context fsGroup",
+ "default": 1001
+ }
+ }
+ },
+ "containerSecurityContext": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Enabled Prometheus containers' Security Context",
+ "default": true
+ },
+ "runAsUser": {
+ "type": "number",
+ "description": "Set Prometheus containers' Security Context runAsUser",
+ "default": 1001
+ },
+ "runAsNonRoot": {
+ "type": "boolean",
+ "description": "Set Prometheus containers' Security Context runAsNonRoot",
+ "default": true
+ },
+ "readOnlyRootFilesystem": {
+ "type": "boolean",
+ "description": "Set Prometheus containers' Security Context runAsNonRoot",
+ "default": false
+ }
+ }
+ },
+ "existingConfigmap": {
+ "type": "string",
+ "description": "The name of an existing ConfigMap with your custom configuration for Prometheus",
+ "default": ""
+ },
+ "existingConfigmapKey": {
+ "type": "string",
+ "description": "The name of the key with the Prometheus config file",
+ "default": ""
+ },
+ "command": {
+ "type": "array",
+ "description": "Override default container command (useful when using custom images)",
+ "default": [],
+ "items": {}
+ },
+ "args": {
+ "type": "array",
+ "description": "Override default container args (useful when using custom images)",
+ "default": [],
+ "items": {}
+ },
+ "extraArgs": {
+ "type": "array",
+ "description": "Additional arguments passed to the Prometheus server container",
+ "default": [],
+ "items": {}
+ },
+ "hostAliases": {
+ "type": "array",
+ "description": "Prometheus pods host aliases",
+ "default": [],
+ "items": {}
+ },
+ "podLabels": {
+ "type": "object",
+ "description": "Extra labels for Prometheus pods",
+ "default": {}
+ },
+ "podAnnotations": {
+ "type": "object",
+ "description": "Annotations for Prometheus pods",
+ "default": {}
+ },
+ "podAffinityPreset": {
+ "type": "string",
+ "description": "Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`",
+ "default": ""
+ },
+ "podAntiAffinityPreset": {
+ "type": "string",
+ "description": "Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`",
+ "default": "soft"
+ },
+ "pdb": {
+ "type": "object",
+ "properties": {
+ "create": {
+ "type": "boolean",
+ "description": "Enable/disable a Pod Disruption Budget creation",
+ "default": false
+ },
+ "minAvailable": {
+ "type": "number",
+ "description": "Minimum number/percentage of pods that should remain scheduled",
+ "default": 1
+ },
+ "maxUnavailable": {
+ "type": "string",
+ "description": "Maximum number/percentage of pods that may be made unavailable",
+ "default": ""
+ }
+ }
+ },
+ "nodeAffinityPreset": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`",
+ "default": ""
+ },
+ "key": {
+ "type": "string",
+ "description": "Node label key to match. Ignored if `affinity` is set",
+ "default": ""
+ },
+ "values": {
+ "type": "array",
+ "description": "Node label values to match. Ignored if `affinity` is set",
+ "default": [],
+ "items": {}
+ }
+ }
+ },
+ "affinity": {
+ "type": "object",
+ "description": "Affinity for Prometheus pods assignment",
+ "default": {}
+ },
+ "nodeSelector": {
+ "type": "object",
+ "description": "Node labels for Prometheus pods assignment",
+ "default": {}
+ },
+ "tolerations": {
+ "type": "array",
+ "description": "Tolerations for Prometheus pods assignment",
+ "default": [],
+ "items": {}
+ },
+ "updateStrategy": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "Prometheus deployment strategy type. If persistence is enabled, strategy type should be set to Recreate to avoid dead locks.",
+ "default": "RollingUpdate"
+ }
+ }
+ },
+ "priorityClassName": {
+ "type": "string",
+ "description": "Prometheus pods' priorityClassName",
+ "default": ""
+ },
+ "topologySpreadConstraints": {
+ "type": "array",
+ "description": "Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template",
+ "default": [],
+ "items": {}
+ },
+ "schedulerName": {
+ "type": "string",
+ "description": "Name of the k8s scheduler (other than default) for Prometheus pods",
+ "default": ""
+ },
+ "terminationGracePeriodSeconds": {
+ "type": "string",
+ "description": "Seconds Redmine pod needs to terminate gracefully",
+ "default": ""
+ },
+ "lifecycleHooks": {
+ "type": "object",
+ "description": "for the Prometheus container(s) to automate configuration before or after startup",
+ "default": {}
+ },
+ "extraEnvVars": {
+ "type": "array",
+ "description": "Array with extra environment variables to add to Prometheus nodes",
+ "default": [],
+ "items": {}
+ },
+ "extraEnvVarsCM": {
+ "type": "string",
+ "description": "Name of existing ConfigMap containing extra env vars for Prometheus nodes",
+ "default": ""
+ },
+ "extraEnvVarsSecret": {
+ "type": "string",
+ "description": "Name of existing Secret containing extra env vars for Prometheus nodes",
+ "default": ""
+ },
+ "extraVolumes": {
+ "type": "array",
+ "description": "Optionally specify extra list of additional volumes for the Prometheus pod(s)",
+ "default": [],
+ "items": {}
+ },
+ "extraVolumeMounts": {
+ "type": "array",
+ "description": "Optionally specify extra list of additional volumeMounts for the Prometheus container(s)",
+ "default": [],
+ "items": {}
+ },
+ "sidecars": {
+ "type": "array",
+ "description": "Add additional sidecar containers to the Prometheus pod(s)",
+ "default": [],
+ "items": {}
+ },
+ "initContainers": {
+ "type": "array",
+ "description": "Add additional init containers to the Prometheus pod(s)",
+ "default": [],
+ "items": {}
+ },
+ "routePrefix": {
+ "type": "string",
+ "description": "Prefix for the internal routes of web endpoints",
+ "default": "/"
+ },
+ "remoteWrite": {
+ "type": "array",
+ "description": "The remote_write spec configuration for Prometheus",
+ "default": [],
+ "items": {}
+ },
+ "scrapeInterval": {
+ "type": "string",
+ "description": "Interval between consecutive scrapes. Example: \"1m\"",
+ "default": ""
+ },
+ "scrapeTimeout": {
+ "type": "string",
+ "description": "Interval between consecutive scrapes. Example: \"10s\"",
+ "default": ""
+ },
+ "evaluationInterval": {
+ "type": "string",
+ "description": "Interval between consecutive evaluations. Example: \"1m\"",
+ "default": ""
+ },
+ "enableAdminAPI": {
+ "type": "boolean",
+ "description": "Enable Prometheus adminitrative API",
+ "default": false
+ },
+ "enableRemoteWriteReceiver": {
+ "type": "boolean",
+ "description": "Enable Prometheus to be used as a receiver for the Prometheus remote write protocol.",
+ "default": false
+ },
+ "enableFeatures": {
+ "type": "array",
+ "description": "Enable access to Prometheus disabled features.",
+ "default": [],
+ "items": {}
+ },
+ "logLevel": {
+ "type": "string",
+ "description": "Log level for Prometheus",
+ "default": "info"
+ },
+ "logFormat": {
+ "type": "string",
+ "description": "Log format for Prometheus",
+ "default": "logfmt"
+ },
+ "retention": {
+ "type": "string",
+ "description": "Metrics retention days",
+ "default": "10d"
+ },
+ "retentionSize": {
+ "type": "string",
+ "description": "Maximum size of metrics",
+ "default": "0"
+ },
+ "alertingEndpoints": {
+ "type": "array",
+ "description": "Alertmanagers to which alerts will be sent",
+ "default": [],
+ "items": {}
+ },
+ "externalLabels": {
+ "type": "object",
+ "description": "External labels to add to any time series or alerts when communicating with external systems",
+ "default": {}
+ },
+ "thanos": {
+ "type": "object",
+ "properties": {
+ "create": {
+ "type": "boolean",
+ "description": "Create a Thanos sidecar container",
+ "default": false
+ },
+ "image": {
+ "type": "object",
+ "properties": {
+ "registry": {
+ "type": "string",
+ "description": "Thanos image registry",
+ "default": "docker.io"
+ },
+ "repository": {
+ "type": "string",
+ "description": "Thanos image name",
+ "default": "bitnami/thanos"
+ },
+ "tag": {
+ "type": "string",
+ "description": "Thanos image tag",
+ "default": "0.31.0-scratch-r3"
+ },
+ "digest": {
+ "type": "string",
+ "description": "Thanos image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag",
+ "default": ""
+ },
+ "pullPolicy": {
+ "type": "string",
+ "description": "Thanos image pull policy",
+ "default": "IfNotPresent"
+ },
+ "pullSecrets": {
+ "type": "array",
+ "description": "Specify docker-registry secret names as an array",
+ "default": [],
+ "items": {}
+ }
+ }
+ },
+ "containerSecurityContext": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Enable container security context",
+ "default": true
+ },
+ "readOnlyRootFilesystem": {
+ "type": "boolean",
+ "description": "mount / (root) as a readonly filesystem",
+ "default": false
+ },
+ "allowPrivilegeEscalation": {
+ "type": "boolean",
+ "description": "Switch privilegeEscalation possibility on or off",
+ "default": false
+ },
+ "runAsNonRoot": {
+ "type": "boolean",
+ "description": "Force the container to run as a non root user",
+ "default": true
+ },
+ "capabilities": {
+ "type": "object",
+ "properties": {
+ "drop": {
+ "type": "array",
+ "description": "Linux Kernel capabilities which should be dropped",
+ "default": [
+ "ALL"
+ ],
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "prometheusUrl": {
+ "type": "string",
+ "description": "Override default prometheus url `http://localhost:9090`",
+ "default": ""
+ },
+ "extraArgs": {
+ "type": "array",
+ "description": "Additional arguments passed to the thanos sidecar container",
+ "default": [],
+ "items": {}
+ },
+ "objectStorageConfig": {
+ "type": "object",
+ "properties": {
+ "secretName": {
+ "type": "string",
+ "description": "Support mounting a Secret for the objectStorageConfig of the sideCar container.",
+ "default": ""
+ },
+ "secretKey": {
+ "type": "string",
+ "description": "Secret key with the configuration file.",
+ "default": "thanos.yaml"
+ }
+ }
+ },
+ "extraVolumeMounts": {
+ "type": "array",
+ "description": "Additional volumeMounts from `server.volumes` for thanos sidecar container",
+ "default": [],
+ "items": {}
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {
+ "type": "object",
+ "description": "The resources limits for the Thanos sidecar container",
+ "default": {}
+ },
+ "requests": {
+ "type": "object",
+ "description": "The resources requests for the Thanos sidecar container",
+ "default": {}
+ }
+ }
+ },
+ "livenessProbe": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Turn on and off liveness probe",
+ "default": true
+ },
+ "initialDelaySeconds": {
+ "type": "number",
+ "description": "Delay before liveness probe is initiated",
+ "default": 0
+ },
+ "periodSeconds": {
+ "type": "number",
+ "description": "How often to perform the probe",
+ "default": 5
+ },
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "When the probe times out",
+ "default": 3
+ },
+ "failureThreshold": {
+ "type": "number",
+ "description": "Minimum consecutive failures for the probe",
+ "default": 120
+ },
+ "successThreshold": {
+ "type": "number",
+ "description": "Minimum consecutive successes for the probe",
+ "default": 1
+ }
+ }
+ },
+ "readinessProbe": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Turn on and off readiness probe",
+ "default": true
+ },
+ "initialDelaySeconds": {
+ "type": "number",
+ "description": "Delay before readiness probe is initiated",
+ "default": 0
+ },
+ "periodSeconds": {
+ "type": "number",
+ "description": "How often to perform the probe",
+ "default": 5
+ },
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "When the probe times out",
+ "default": 3
+ },
+ "failureThreshold": {
+ "type": "number",
+ "description": "Minimum consecutive failures for the probe",
+ "default": 120
+ },
+ "successThreshold": {
+ "type": "number",
+ "description": "Minimum consecutive successes for the probe",
+ "default": 1
+ }
+ }
+ },
+ "customLivenessProbe": {
+ "type": "object",
+ "description": "Custom livenessProbe that overrides the default one",
+ "default": {}
+ },
+ "customReadinessProbe": {
+ "type": "object",
+ "description": "Custom readinessProbe that overrides the default one",
+ "default": {}
+ },
+ "service": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "Kubernetes service type",
+ "default": "ClusterIP"
+ },
+ "ports": {
+ "type": "object",
+ "properties": {
+ "grpc": {
+ "type": "number",
+ "description": "Thanos service port",
+ "default": 10901
+ }
+ }
+ },
+ "clusterIP": {
+ "type": "string",
+ "description": "Specific cluster IP when service type is cluster IP. Use `None` to create headless service by default.",
+ "default": "None"
+ },
+ "nodePorts": {
+ "type": "object",
+ "properties": {
+ "grpc": {
+ "type": "string",
+ "description": "Specify the nodePort value for the LoadBalancer and NodePort service types.",
+ "default": ""
+ }
+ }
+ },
+ "loadBalancerIP": {
+ "type": "string",
+ "description": "`loadBalancerIP` if service type is `LoadBalancer`",
+ "default": ""
+ },
+ "loadBalancerSourceRanges": {
+ "type": "array",
+ "description": "Address that are allowed when svc is `LoadBalancer`",
+ "default": [],
+ "items": {}
+ },
+ "annotations": {
+ "type": "object",
+ "description": "Additional annotations for Prometheus service",
+ "default": {}
+ },
+ "extraPorts": {
+ "type": "array",
+ "description": "Additional ports to expose from the Thanos sidecar container",
+ "default": [],
+ "items": {}
+ },
+ "externalTrafficPolicy": {
+ "type": "string",
+ "description": "Prometheus service external traffic policy",
+ "default": "Cluster"
+ },
+ "sessionAffinity": {
+ "type": "string",
+ "description": "Session Affinity for Kubernetes service, can be \"None\" or \"ClientIP\"",
+ "default": "None"
+ },
+ "sessionAffinityConfig": {
+ "type": "object",
+ "description": "Additional settings for the sessionAffinity",
+ "default": {}
+ }
+ }
+ },
+ "ingress": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Enable ingress controller resource",
+ "default": false
+ },
+ "pathType": {
+ "type": "string",
+ "description": "Ingress path type",
+ "default": "ImplementationSpecific"
+ },
+ "hostname": {
+ "type": "string",
+ "description": "Default host for the ingress record",
+ "default": "thanos.prometheus.local"
+ },
+ "path": {
+ "type": "string",
+ "description": "Default path for the ingress record",
+ "default": "/"
+ },
+ "annotations": {
+ "type": "object",
+ "description": "Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.",
+ "default": {}
+ },
+ "ingressClassName": {
+ "type": "string",
+ "description": "IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)",
+ "default": ""
+ },
+ "tls": {
+ "type": "boolean",
+ "description": "Enable TLS configuration for the host defined at `ingress.hostname` parameter",
+ "default": false
+ },
+ "selfSigned": {
+ "type": "boolean",
+ "description": "Create a TLS secret for this ingress record using self-signed certificates generated by Helm",
+ "default": false
+ },
+ "extraHosts": {
+ "type": "array",
+ "description": "An array with additional hostname(s) to be covered with the ingress record",
+ "default": [],
+ "items": {}
+ },
+ "extraPaths": {
+ "type": "array",
+ "description": "An array with additional arbitrary paths that may need to be added to the ingress under the main host",
+ "default": [],
+ "items": {}
+ },
+ "extraTls": {
+ "type": "array",
+ "description": "TLS configuration for additional hostname(s) to be covered with this ingress record",
+ "default": [],
+ "items": {}
+ },
+ "secrets": {
+ "type": "array",
+ "description": "Custom TLS certificates as secrets",
+ "default": [],
+ "items": {}
+ },
+ "extraRules": {
+ "type": "array",
+ "description": "The list of additional rules to be added to this ingress record. Evaluated as a template",
+ "default": [],
+ "items": {}
+ }
+ }
+ }
+ }
+ },
+ "ingress": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Enable ingress record generation for Prometheus",
+ "default": false
+ },
+ "pathType": {
+ "type": "string",
+ "description": "Ingress path type",
+ "default": "ImplementationSpecific"
+ },
+ "hostname": {
+ "type": "string",
+ "description": "Default host for the ingress record",
+ "default": "server.prometheus.local"
+ },
+ "ingressClassName": {
+ "type": "string",
+ "description": "IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)",
+ "default": ""
+ },
+ "path": {
+ "type": "string",
+ "description": "Default path for the ingress record",
+ "default": "/"
+ },
+ "annotations": {
+ "type": "object",
+ "description": "Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.",
+ "default": {}
+ },
+ "tls": {
+ "type": "boolean",
+ "description": "Enable TLS configuration for the host defined at `ingress.hostname` parameter",
+ "default": false
+ },
+ "selfSigned": {
+ "type": "boolean",
+ "description": "Create a TLS secret for this ingress record using self-signed certificates generated by Helm",
+ "default": false
+ },
+ "extraHosts": {
+ "type": "array",
+ "description": "An array with additional hostname(s) to be covered with the ingress record",
+ "default": [],
+ "items": {}
+ },
+ "extraPaths": {
+ "type": "array",
+ "description": "An array with additional arbitrary paths that may need to be added to the ingress under the main host",
+ "default": [],
+ "items": {}
+ },
+ "extraTls": {
+ "type": "array",
+ "description": "TLS configuration for additional hostname(s) to be covered with this ingress record",
+ "default": [],
+ "items": {}
+ },
+ "secrets": {
+ "type": "array",
+ "description": "Custom TLS certificates as secrets",
+ "default": [],
+ "items": {}
+ },
+ "extraRules": {
+ "type": "array",
+ "description": "Additional rules to be covered with this ingress record",
+ "default": [],
+ "items": {}
+ }
+ }
+ },
+ "serviceAccount": {
+ "type": "object",
+ "properties": {
+ "create": {
+ "type": "boolean",
+ "description": "Specifies whether a ServiceAccount should be created",
+ "default": true
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the ServiceAccount to use.",
+ "default": ""
+ },
+ "annotations": {
+ "type": "object",
+ "description": "Additional Service Account annotations (evaluated as a template)",
+ "default": {}
+ },
+ "automountServiceAccountToken": {
+ "type": "boolean",
+ "description": "Automount service account token for the server service account",
+ "default": true
+ }
+ }
+ },
+ "service": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "Prometheus service type",
+ "default": "LoadBalancer"
+ },
+ "ports": {
+ "type": "object",
+ "properties": {
+ "http": {
+ "type": "number",
+ "description": "Prometheus service HTTP port",
+ "default": 80
+ }
+ }
+ },
+ "nodePorts": {
+ "type": "object",
+ "properties": {
+ "http": {
+ "type": "string",
+ "description": "Node port for HTTP",
+ "default": ""
+ }
+ }
+ },
+ "clusterIP": {
+ "type": "string",
+ "description": "Prometheus service Cluster IP",
+ "default": ""
+ },
+ "loadBalancerIP": {
+ "type": "string",
+ "description": "Prometheus service Load Balancer IP",
+ "default": ""
+ },
+ "loadBalancerSourceRanges": {
+ "type": "array",
+ "description": "Prometheus service Load Balancer sources",
+ "default": [],
+ "items": {}
+ },
+ "externalTrafficPolicy": {
+ "type": "string",
+ "description": "Prometheus service external traffic policy",
+ "default": "Cluster"
+ },
+ "annotations": {
+ "type": "object",
+ "description": "Additional custom annotations for Prometheus service",
+ "default": {}
+ },
+ "extraPorts": {
+ "type": "array",
+ "description": "Extra ports to expose in Prometheus service (normally used with the `sidecars` value)",
+ "default": [],
+ "items": {}
+ },
+ "sessionAffinity": {
+ "type": "string",
+ "description": "Control where client requests go, to the same pod or round-robin. ClientIP by default.",
+ "default": "ClientIP"
+ },
+ "sessionAffinityConfig": {
+ "type": "object",
+ "description": "Additional settings for the sessionAffinity",
+ "default": {}
+ }
+ }
+ },
+ "persistence": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Enable persistence using Persistent Volume Claims. If you have multiple instances (server.repicacount > 1), please considere using an external storage service like Thanos or Grafana Mimir",
+ "default": false
+ },
+ "mountPath": {
+ "type": "string",
+ "description": "Path to mount the volume at.",
+ "default": "/bitnami/prometheus/data"
+ },
+ "subPath": {
+ "type": "string",
+ "description": "The subdirectory of the volume to mount to, useful in dev environments and one PV for multiple services",
+ "default": ""
+ },
+ "storageClass": {
+ "type": "string",
+ "description": "Storage class of backing PVC",
+ "default": ""
+ },
+ "annotations": {
+ "type": "object",
+ "description": "Persistent Volume Claim annotations",
+ "default": {}
+ },
+ "accessModes": {
+ "type": "array",
+ "description": "Persistent Volume Access Modes",
+ "default": [
+ "ReadWriteOnce"
+ ],
+ "items": {
+ "type": "string"
+ }
+ },
+ "size": {
+ "type": "string",
+ "description": "Size of data volume",
+ "default": "8Gi"
+ },
+ "existingClaim": {
+ "type": "string",
+ "description": "The name of an existing PVC to use for persistence",
+ "default": ""
+ },
+ "selector": {
+ "type": "object",
+ "description": "Selector to match an existing Persistent Volume for WordPress data PVC",
+ "default": {}
+ },
+ "dataSource": {
+ "type": "object",
+ "description": "Custom PVC data source",
+ "default": {}
+ }
+ }
+ },
+ "rbac": {
+ "type": "object",
+ "properties": {
+ "create": {
+ "type": "boolean",
+ "description": "Specifies whether RBAC resources should be created",
+ "default": true
+ },
+ "rules": {
+ "type": "array",
+ "description": "Custom RBAC rules to set",
+ "default": [],
+ "items": {}
+ }
+ }
+ }
+ }
+ },
+ "volumePermissions": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup`",
+ "default": false
+ },
+ "image": {
+ "type": "object",
+ "properties": {
+ "registry": {
+ "type": "string",
+ "description": "Bitnami Shell image registry",
+ "default": "docker.io"
+ },
+ "repository": {
+ "type": "string",
+ "description": "Bitnami Shell image repository",
+ "default": "bitnami/bitnami-shell"
+ },
+ "tag": {
+ "type": "string",
+ "description": "Bitnami Shell image tag (immutable tags are recommended)",
+ "default": "11-debian-11-r99"
+ },
+ "pullPolicy": {
+ "type": "string",
+ "description": "Bitnami Shell image pull policy",
+ "default": "IfNotPresent"
+ },
+ "pullSecrets": {
+ "type": "array",
+ "description": "Bitnami Shell image pull secrets",
+ "default": [],
+ "items": {}
+ }
+ }
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {
+ "type": "object",
+ "description": "The resources limits for the init container",
+ "default": {}
+ },
+ "requests": {
+ "type": "object",
+ "description": "The requested resources for the init container",
+ "default": {}
+ }
+ }
+ },
+ "containerSecurityContext": {
+ "type": "object",
+ "properties": {
+ "runAsUser": {
+ "type": "number",
+ "description": "Set init container's Security Context runAsUser",
+ "default": 0
+ }
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/bitnami/prometheus/values.yaml b/bitnami/prometheus/values.yaml
new file mode 100644
index 0000000000..52b044907b
--- /dev/null
+++ b/bitnami/prometheus/values.yaml
@@ -0,0 +1,1508 @@
+## @section Global parameters
+## Global Docker image parameters
+## Please, note that this will override the image parameters, including dependencies, configured to use the global value
+## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
+##
+
+## @param global.imageRegistry Global Docker image registry
+## @param global.imagePullSecrets Global Docker registry secret names as an array
+## @param global.storageClass Global StorageClass for Persistent Volume(s)
+##
+global:
+ imageRegistry: ""
+ ## E.g.
+ ## imagePullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ imagePullSecrets: []
+ storageClass: ""
+
+## @section Common parameters
+##
+
+## @param kubeVersion Override Kubernetes version
+##
+kubeVersion: ""
+## @param nameOverride String to partially override common.names.name
+##
+nameOverride: ""
+## @param fullnameOverride String to fully override common.names.fullname
+##
+fullnameOverride: ""
+## @param namespaceOverride String to fully override common.names.namespace
+##
+namespaceOverride: ""
+## @param commonLabels Labels to add to all deployed objects
+##
+commonLabels: {}
+## @param commonAnnotations Annotations to add to all deployed objects
+##
+commonAnnotations: {}
+## @param clusterDomain Kubernetes cluster domain name
+##
+clusterDomain: cluster.local
+## @param extraDeploy Array of extra objects to deploy with the release
+##
+extraDeploy: []
+
+## Enable diagnostic mode in the deployment
+##
+diagnosticMode:
+ ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
+ ##
+ enabled: false
+ ## @param diagnosticMode.command Command to override all containers in the deployment
+ ##
+ command:
+ - sleep
+ ## @param diagnosticMode.args Args to override all containers in the deployment
+ ##
+ args:
+ - infinity
+## @param ingress.apiVersion Force Ingress API version (automatically detected if not set)
+##
+ingress:
+ apiVersion: ""
+
+## @section Alertmanager Parameters
+##
+## Bitnami Alertmanager image
+## ref: https://hub.docker.com/r/bitnami/alertmanager/tags/
+## @param alertmanager.enabled Alertmanager enabled
+## @param alertmanager.image.registry Alertmanager image registry
+## @param alertmanager.image.repository Alertmanager image repository
+## @param alertmanager.image.tag Alertmanager image tag (immutable tags are recommended)
+## @param alertmanager.image.digest Alertmanager image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended)
+## @param alertmanager.image.pullPolicy Alertmanager image pull policy
+## @param alertmanager.image.pullSecrets Alertmanager image pull secrets
+##
+alertmanager:
+ enabled: true
+ image:
+ registry: docker.io
+ repository: bitnami/alertmanager
+ tag: 0.25.0-debian-11-r48
+ digest: ""
+ ## Specify a imagePullPolicy
+ ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+ ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+ ##
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## e.g:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+
+ ## @param alertmanager.configuration [string] Alertmanager configuration. This content will be stored in the the alertmanager.yaml file and the content can be a template.
+ ## ref:
+ ##
+ configuration: |
+ receivers:
+ - name: default-receiver
+ route:
+ group_wait: 10s
+ group_interval: 5m
+ receiver: default-receiver
+ repeat_interval: 3h
+
+ ## @param alertmanager.replicaCount Number of Alertmanager replicas to deploy
+ ##
+ replicaCount: 1
+ ## @param alertmanager.containerPorts.http Alertmanager HTTP container port
+ ## @param alertmanager.containerPorts.cluster Alertmanager Cluster HA port
+ ##
+ containerPorts:
+ http: 9093
+ cluster: 9094
+ ## Configure extra options for Alertmanager containers' liveness and readiness probes
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
+ ## @param alertmanager.livenessProbe.enabled Enable livenessProbe on Alertmanager containers
+ ## @param alertmanager.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+ ## @param alertmanager.livenessProbe.periodSeconds Period seconds for livenessProbe
+ ## @param alertmanager.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+ ## @param alertmanager.livenessProbe.failureThreshold Failure threshold for livenessProbe
+ ## @param alertmanager.livenessProbe.successThreshold Success threshold for livenessProbe
+ ##
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 20
+ timeoutSeconds: 3
+ failureThreshold: 3
+ successThreshold: 1
+ ## @param alertmanager.readinessProbe.enabled Enable readinessProbe on Alertmanager containers
+ ## @param alertmanager.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+ ## @param alertmanager.readinessProbe.periodSeconds Period seconds for readinessProbe
+ ## @param alertmanager.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+ ## @param alertmanager.readinessProbe.failureThreshold Failure threshold for readinessProbe
+ ## @param alertmanager.readinessProbe.successThreshold Success threshold for readinessProbe
+ ##
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 2
+ failureThreshold: 5
+ successThreshold: 1
+ ## @param alertmanager.startupProbe.enabled Enable startupProbe on Alertmanager containers
+ ## @param alertmanager.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
+ ## @param alertmanager.startupProbe.periodSeconds Period seconds for startupProbe
+ ## @param alertmanager.startupProbe.timeoutSeconds Timeout seconds for startupProbe
+ ## @param alertmanager.startupProbe.failureThreshold Failure threshold for startupProbe
+ ## @param alertmanager.startupProbe.successThreshold Success threshold for startupProbe
+ ##
+ startupProbe:
+ enabled: false
+ initialDelaySeconds: 2
+ periodSeconds: 5
+ timeoutSeconds: 2
+ failureThreshold: 10
+ successThreshold: 1
+ ## @param alertmanager.customLivenessProbe Custom livenessProbe that overrides the default one
+ ##
+ customLivenessProbe: {}
+ ## @param alertmanager.customReadinessProbe Custom readinessProbe that overrides the default one
+ ##
+ customReadinessProbe: {}
+ ## @param alertmanager.customStartupProbe Custom startupProbe that overrides the default one
+ ##
+ customStartupProbe: {}
+ ## Alertmanager resource requests and limits
+ ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param alertmanager.resources.limits The resources limits for the Alertmanager containers
+ ## @param alertmanager.resources.requests The requested resources for the Alertmanager containers
+ ##
+ resources:
+ limits: {}
+ requests: {}
+ ## Configure Pods Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+ ## @param alertmanager.podSecurityContext.enabled Enabled Alertmanager pods' Security Context
+ ## @param alertmanager.podSecurityContext.fsGroup Set Alertmanager pod's Security Context fsGroup
+ ##
+ podSecurityContext:
+ enabled: true
+ fsGroup: 1001
+ ## Configure Container Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+ ## @param alertmanager.containerSecurityContext.enabled Enabled Alertmanager containers' Security Context
+ ## @param alertmanager.containerSecurityContext.runAsUser Set Alertmanager containers' Security Context runAsUser
+ ## @param alertmanager.containerSecurityContext.runAsNonRoot Set Alertmanager containers' Security Context runAsNonRoot
+ ## @param alertmanager.containerSecurityContext.readOnlyRootFilesystem Set Alertmanager containers' Security Context runAsNonRoot
+ ##
+ containerSecurityContext:
+ enabled: true
+ runAsUser: 1001
+ runAsNonRoot: true
+ readOnlyRootFilesystem: false
+
+ ## @param alertmanager.existingConfigmap The name of an existing ConfigMap with your custom configuration for Alertmanager
+ ##
+ existingConfigmap: ""
+ ## @param alertmanager.existingConfigmapKey The name of the key with the Alertmanager config file
+ ##
+ existingConfigmapKey: ""
+ ## @param alertmanager.command Override default container command (useful when using custom images)
+ ##
+ command: []
+ ## @param alertmanager.args Override default container args (useful when using custom images)
+ ##
+ args: []
+ ## @param alertmanager.extraArgs Additional arguments passed to the Prometheus server container
+ ## extraArgs:
+ ## - --log.level=debug
+ ## - --tsdb.path=/data/
+ ##
+ extraArgs: []
+ ## @param alertmanager.hostAliases Alertmanager pods host aliases
+ ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
+ ##
+ hostAliases: []
+ ## @param alertmanager.podLabels Extra labels for Alertmanager pods
+ ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+ ##
+ podLabels: {}
+ ## @param alertmanager.podAnnotations Annotations for Alertmanager pods
+ ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+ ##
+ podAnnotations: {}
+ ## @param alertmanager.podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAffinityPreset: ""
+ ## @param alertmanager.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAntiAffinityPreset: soft
+ ## Pod Disruption Budget configuration
+ ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
+ ## @param alertmanager.pdb.create Enable/disable a Pod Disruption Budget creation
+ ## @param alertmanager.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
+ ## @param alertmanager.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable
+ ##
+ pdb:
+ create: false
+ minAvailable: 1
+ maxUnavailable: ""
+
+ ## Node affinity preset
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+ ##
+ nodeAffinityPreset:
+ ## @param alertmanager.nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+ ##
+ type: ""
+ ## @param alertmanager.nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set
+ ##
+ key: ""
+ ## @param alertmanager.nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set
+ ## E.g.
+ ## values:
+ ## - e2e-az1
+ ## - e2e-az2
+ ##
+ values: []
+ ## @param alertmanager.affinity Affinity for Alertmanager pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ## NOTE: `podAffinityPreset`, `podAntiAffinityPreset`, and `nodeAffinityPreset` will be ignored when it's set
+ ##
+ affinity: {}
+ ## @param alertmanager.nodeSelector Node labels for Alertmanager pods assignment
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+ ## @param alertmanager.tolerations Tolerations for Alertmanager pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ ## @param alertmanager.updateStrategy.type Alertmanager statefulset strategy type
+ ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+ ##
+ updateStrategy:
+ ## StrategyType
+ ## Can be set to RollingUpdate or OnDelete
+ ##
+ type: RollingUpdate
+
+ ## @param alertmanager.podManagementPolicy Statefulset Pod management policy, it needs to be Parallel to be able to complete the cluster join
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
+ ##
+ podManagementPolicy: OrderedReady
+ ## @param alertmanager.priorityClassName Alertmanager pods' priorityClassName
+ ##
+ priorityClassName: ""
+ ## @param alertmanager.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
+ ##
+ topologySpreadConstraints: []
+ ## @param alertmanager.schedulerName Name of the k8s scheduler (other than default) for Alertmanager pods
+ ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+ ##
+ schedulerName: ""
+ ## @param alertmanager.terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully
+ ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
+ ##
+ terminationGracePeriodSeconds: ""
+ ## @param alertmanager.lifecycleHooks for the Alertmanager container(s) to automate configuration before or after startup
+ ##
+ lifecycleHooks: {}
+ ## @param alertmanager.extraEnvVars Array with extra environment variables to add to Alertmanager nodes
+ ## e.g:
+ ## extraEnvVars:
+ ## - name: FOO
+ ## value: "bar"
+ ##
+ extraEnvVars: []
+ ## @param alertmanager.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Alertmanager nodes
+ ##
+ extraEnvVarsCM: ""
+ ## @param alertmanager.extraEnvVarsSecret Name of existing Secret containing extra env vars for Alertmanager nodes
+ ##
+ extraEnvVarsSecret: ""
+ ## @param alertmanager.extraVolumes Optionally specify extra list of additional volumes for the Alertmanager pod(s)
+ ##
+ extraVolumes: []
+ ## @param alertmanager.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Alertmanager container(s)
+ ##
+ extraVolumeMounts: []
+ ## @param alertmanager.sidecars Add additional sidecar containers to the Alertmanager pod(s)
+ ## e.g:
+ ## sidecars:
+ ## - name: your-image-name
+ ## image: your-image
+ ## imagePullPolicy: Always
+ ## ports:
+ ## - name: portname
+ ## containerPort: 1234
+ ##
+ sidecars: []
+ ## @param alertmanager.initContainers Add additional init containers to the Alertmanager pod(s)
+ ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
+ ## e.g:
+ ## initContainers:
+ ## - name: your-image-name
+ ## image: your-image
+ ## imagePullPolicy: Always
+ ## command: ['sh', '-c', 'echo "hello world"']
+ ##
+ initContainers: []
+
+ ## Alertmanager ingress parameters
+ ## ref: http://kubernetes.io/docs/user-guide/ingress/
+ ##
+ ingress:
+ ## @param alertmanager.ingress.enabled Enable ingress record generation for Alertmanager
+ ##
+ enabled: false
+ ## @param alertmanager.ingress.pathType Ingress path type
+ ##
+ pathType: ImplementationSpecific
+
+ ## @param alertmanager.ingress.hostname Default host for the ingress record
+ ##
+ hostname: alertmanager.prometheus.local
+ ## @param alertmanager.ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
+ ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
+ ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
+ ##
+ ingressClassName: ""
+ ## @param alertmanager.ingress.path Default path for the ingress record
+ ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers
+ ##
+ path: /
+ ## @param alertmanager.ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
+ ## Use this parameter to set the required annotations for cert-manager, see
+ ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
+ ## e.g:
+ ## annotations:
+ ## kubernetes.io/ingress.class: nginx
+ ## cert-manager.io/cluster-issuer: cluster-issuer-name
+ ##
+ annotations: {}
+ ## @param alertmanager.ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter
+ ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}`
+ ## You can:
+ ## - Use the `ingress.secrets` parameter to create this TLS secret
+ ## - Rely on cert-manager to create it by setting the corresponding annotations
+ ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true`
+ ##
+ tls: false
+ ## @param alertmanager.ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
+ ##
+ selfSigned: false
+ ## @param alertmanager.ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record
+ ## e.g:
+ ## extraHosts:
+ ## - name: prometheus.local
+ ## path: /
+ ##
+ extraHosts: []
+ ## @param alertmanager.ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host
+ ## e.g:
+ ## extraPaths:
+ ## - path: /*
+ ## backend:
+ ## serviceName: ssl-redirect
+ ## servicePort: use-annotation
+ ##
+ extraPaths: []
+ ## @param alertmanager.ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
+ ## e.g:
+ ## extraTls:
+ ## - hosts:
+ ## - prometheus.local
+ ## secretName: prometheus.local-tls
+ ##
+ extraTls: []
+ ## @param alertmanager.ingress.secrets Custom TLS certificates as secrets
+ ## NOTE: 'key' and 'certificate' are expected in PEM format
+ ## NOTE: 'name' should line up with a 'secretName' set further up
+ ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates
+ ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days
+ ## It is also possible to create and manage the certificates outside of this helm chart
+ ## Please see README.md for more information
+ ## e.g:
+ ## secrets:
+ ## - name: prometheus.local-tls
+ ## key: |-
+ ## -----BEGIN RSA PRIVATE KEY-----
+ ## ...
+ ## -----END RSA PRIVATE KEY-----
+ ## certificate: |-
+ ## -----BEGIN CERTIFICATE-----
+ ## ...
+ ## -----END CERTIFICATE-----
+ ##
+ secrets: []
+ ## @param alertmanager.ingress.extraRules Additional rules to be covered with this ingress record
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
+ ## e.g:
+ ## extraRules:
+ ## - host: example.local
+ ## http:
+ ## path: /
+ ## backend:
+ ## service:
+ ## name: example-svc
+ ## port:
+ ## name: http
+ ##
+ extraRules: []
+
+ ## ServiceAccount configuration
+ ##
+ serviceAccount:
+ ## @param alertmanager.serviceAccount.create Specifies whether a ServiceAccount should be created
+ ##
+ create: true
+ ## @param alertmanager.serviceAccount.name The name of the ServiceAccount to use.
+ ## If not set and create is true, a name is generated using the common.names.fullname template
+ ##
+ name: ""
+ ## @param alertmanager.serviceAccount.annotations Additional Service Account annotations (evaluated as a template)
+ ##
+ annotations: {}
+ ## @param alertmanager.serviceAccount.automountServiceAccountToken Automount service account token for the server service account
+ ##
+ automountServiceAccountToken: true
+
+ ## Alertmanager service parameters
+ ##
+ service:
+ ## @param alertmanager.service.type Alertmanager service type
+ ##
+ type: LoadBalancer
+ ## @param alertmanager.service.ports.http Alertmanager service HTTP port
+ ## @param alertmanager.service.ports.cluster Alertmanager cluster HA port
+ ##
+ ports:
+ http: 80
+ cluster: 9094
+ ## Node ports to expose
+ ## @param alertmanager.service.nodePorts.http Node port for HTTP
+ ## NOTE: choose port between <30000-32767>
+ ##
+ nodePorts:
+ http: ""
+ ## @param alertmanager.service.clusterIP Alertmanager service Cluster IP
+ ## e.g.:
+ ## clusterIP: None
+ ##
+ clusterIP: ""
+ ## @param alertmanager.service.loadBalancerIP Alertmanager service Load Balancer IP
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
+ ##
+ loadBalancerIP: ""
+ ## @param alertmanager.service.loadBalancerSourceRanges Alertmanager service Load Balancer sources
+ ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
+ ## e.g:
+ ## loadBalancerSourceRanges:
+ ## - 10.10.10.0/24
+ ##
+ loadBalancerSourceRanges: []
+ ## @param alertmanager.service.externalTrafficPolicy Alertmanager service external traffic policy
+ ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ ##
+ externalTrafficPolicy: Cluster
+ ## @param alertmanager.service.annotations Additional custom annotations for Alertmanager service
+ ##
+ annotations: {}
+ ## @param alertmanager.service.extraPorts Extra ports to expose in Alertmanager service (normally used with the `sidecars` value)
+ ##
+ extraPorts: []
+ ## @param alertmanager.service.sessionAffinity Control where client requests go, to the same pod or round-robin
+ ## Values: ClientIP or None
+ ## ref: https://kubernetes.io/docs/user-guide/services/
+ ##
+ sessionAffinity: None
+ ## @param alertmanager.service.sessionAffinityConfig Additional settings for the sessionAffinity
+ ## sessionAffinityConfig:
+ ## clientIP:
+ ## timeoutSeconds: 300
+ ##
+ sessionAffinityConfig: {}
+
+ persistence:
+ ## @param alertmanager.persistence.enabled Enable Alertmanager data persistence using VolumeClaimTemplates
+ ##
+ enabled: false
+ ## @param alertmanager.persistence.mountPath Path to mount the volume at.
+ ##
+ mountPath: /bitnami/alertmanager/data
+ ## @param alertmanager.persistence.subPath The subdirectory of the volume to mount to, useful in dev environments and one PV for multiple services
+ ##
+ subPath: ""
+ ## @param alertmanager.persistence.storageClass PVC Storage Class for Concourse worker data volume
+ ## If defined, storageClassName:
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ storageClass: ""
+ ## @param alertmanager.persistence.accessModes PVC Access Mode for Concourse worker volume
+ ##
+ accessModes:
+ - ReadWriteOnce
+ ## @param alertmanager.persistence.size PVC Storage Request for Concourse worker volume
+ ##
+ size: 8Gi
+ ## @param alertmanager.persistence.annotations Annotations for the PVC
+ ##
+ annotations: {}
+ ## @param alertmanager.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template)
+ ## selector:
+ ## matchLabels:
+ ## app: my-app
+ ##
+ selector: {}
+
+## @section Prometheus server Parameters
+##
+## Bitnami Prometheus image
+## ref: https://hub.docker.com/r/bitnami/prometheus/tags/
+## @param server.image.registry Prometheus image registry
+## @param server.image.repository Prometheus image repository
+## @param server.image.tag Prometheus image tag (immutable tags are recommended)
+## @param server.image.digest Prometheus image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended)
+## @param server.image.pullPolicy Prometheus image pull policy
+## @param server.image.pullSecrets Prometheus image pull secrets
+##
+server:
+ image:
+ registry: docker.io
+ repository: bitnami/prometheus
+ tag: 2.44.0-debian-11-r0
+ digest: ""
+ ## Specify a imagePullPolicy
+ ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+ ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+ ##
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## e.g:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+
+ ## @param server.configuration [string] Promethus configuration. This content will be stored in the the prometheus.yaml file and the content can be a template.
+ ## ref:
+ ##
+ configuration: |
+ global:
+ {{- if .Values.server.scrapeInterval }}
+ scrape_interval: {{ .Values.server.scrapeInterval }}
+ {{- end }}
+ {{- if .Values.server.scrapeTimeout }}
+ scrape_timeout: {{ .Values.server.scrapeTimeout }}
+ {{- end }}
+ {{- if .Values.server.evaluationInterval }}
+ evaluation_interval: {{ .Values.server.evaluationInterval }}
+ {{- end }}
+ external_labels:
+ monitor: {{ template "common.names.fullname" . }}
+ {{- if .Values.server.externalLabels }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.externalLabels "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.server.remoteWrite }}
+ remote_write: {{- include "common.tplvalues.render" (dict "value" .Values.server.remoteWrite "context" $) | nindent 4 }}
+ {{- end }}
+ scrape_configs:
+ - job_name: prometheus
+ {{- include "prometheus.scrape_config" (dict "component" "server" "context" $) | nindent 4 }}
+ {{- if .Values.alertmanager.enabled }}
+ - job_name: alertmanager
+ {{- include "prometheus.scrape_config" (dict "component" "alertmanager" "context" $) | nindent 4 }}
+ {{- end }}
+ {{- if .Values.server.extraScrapeConfigs}}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.extraScrapeConfigs "context" $) | nindent 2 }}
+ {{- end }}
+ {{- if or .Values.alertmanager.enabled .Values.server.alertingEndpoints}}
+ alerting:
+ alertmanagers:
+ {{- if .Values.server.alertingEndpoints }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.server.alertingEndpoints "context" $) | nindent 4 }}
+ {{- end }}
+ - scheme: HTTP
+ static_configs:
+ - targets: [ {{ printf "%s:%d" (include "prometheus.alertmanager.fullname" .) (int .Values.alertmanager.service.ports.http) }}]
+ rule_files:
+ - rules.yaml
+ {{- end }}
+
+
+ ## @param server.alertingRules Prometheus alerting rules. This content will be stored in the the rules.yaml file and the content can be a template.
+ ## ref:
+ ##
+ alertingRules: {}
+ ## @param server.extraScrapeConfigs Promethus configuration, useful to declare new scrape_configs. This content will be merged with the 'server.configuration' value and stored in the the prometheus.yaml file.
+ ## ref:
+ ##
+ extraScrapeConfigs: []
+ ## @param server.replicaCount Number of Prometheus replicas to deploy
+ ##
+ replicaCount: 1
+ ## @param server.containerPorts.http Prometheus HTTP container port
+ ##
+ containerPorts:
+ http: 9090
+ ## Configure extra options for Prometheus containers' liveness and readiness probes
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
+ ## @param server.livenessProbe.enabled Enable livenessProbe on Prometheus containers
+ ## @param server.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+ ## @param server.livenessProbe.periodSeconds Period seconds for livenessProbe
+ ## @param server.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+ ## @param server.livenessProbe.failureThreshold Failure threshold for livenessProbe
+ ## @param server.livenessProbe.successThreshold Success threshold for livenessProbe
+ ##
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 20
+ timeoutSeconds: 3
+ failureThreshold: 3
+ successThreshold: 1
+ ## @param server.readinessProbe.enabled Enable readinessProbe on Prometheus containers
+ ## @param server.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+ ## @param server.readinessProbe.periodSeconds Period seconds for readinessProbe
+ ## @param server.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+ ## @param server.readinessProbe.failureThreshold Failure threshold for readinessProbe
+ ## @param server.readinessProbe.successThreshold Success threshold for readinessProbe
+ ##
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 2
+ failureThreshold: 5
+ successThreshold: 1
+ ## @param server.startupProbe.enabled Enable startupProbe on Prometheus containers
+ ## @param server.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
+ ## @param server.startupProbe.periodSeconds Period seconds for startupProbe
+ ## @param server.startupProbe.timeoutSeconds Timeout seconds for startupProbe
+ ## @param server.startupProbe.failureThreshold Failure threshold for startupProbe
+ ## @param server.startupProbe.successThreshold Success threshold for startupProbe
+ ##
+ startupProbe:
+ enabled: false
+ initialDelaySeconds: 2
+ periodSeconds: 5
+ timeoutSeconds: 2
+ failureThreshold: 10
+ successThreshold: 1
+ ## @param server.customLivenessProbe Custom livenessProbe that overrides the default one
+ ##
+ customLivenessProbe: {}
+ ## @param server.customReadinessProbe Custom readinessProbe that overrides the default one
+ ##
+ customReadinessProbe: {}
+ ## @param server.customStartupProbe Custom startupProbe that overrides the default one
+ ##
+ customStartupProbe: {}
+ ## Prometheus resource requests and limits
+ ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param server.resources.limits The resources limits for the Prometheus containers
+ ## @param server.resources.requests The requested resources for the Prometheus containers
+ ##
+ resources:
+ limits: {}
+ requests: {}
+ ## Configure Pods Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+ ## @param server.podSecurityContext.enabled Enabled Prometheus pods' Security Context
+ ## @param server.podSecurityContext.fsGroup Set Prometheus pod's Security Context fsGroup
+ ##
+ podSecurityContext:
+ enabled: true
+ fsGroup: 1001
+ ## Configure Container Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+ ## @param server.containerSecurityContext.enabled Enabled Prometheus containers' Security Context
+ ## @param server.containerSecurityContext.runAsUser Set Prometheus containers' Security Context runAsUser
+ ## @param server.containerSecurityContext.runAsNonRoot Set Prometheus containers' Security Context runAsNonRoot
+ ## @param server.containerSecurityContext.readOnlyRootFilesystem Set Prometheus containers' Security Context runAsNonRoot
+ ##
+ containerSecurityContext:
+ enabled: true
+ runAsUser: 1001
+ runAsNonRoot: true
+ readOnlyRootFilesystem: false
+
+ ## @param server.existingConfigmap The name of an existing ConfigMap with your custom configuration for Prometheus
+ ##
+ existingConfigmap: ""
+ ## @param server.existingConfigmapKey The name of the key with the Prometheus config file
+ ##
+ existingConfigmapKey: ""
+ ## @param server.command Override default container command (useful when using custom images)
+ ##
+ command: []
+ ## @param server.args Override default container args (useful when using custom images)
+ ##
+ args: []
+ ## @param server.extraArgs Additional arguments passed to the Prometheus server container
+ ## extraArgs:
+ ## - --log.level=debug
+ ## - --tsdb.path=/data/
+ ##
+ extraArgs: []
+ ## @param server.hostAliases Prometheus pods host aliases
+ ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
+ ##
+ hostAliases: []
+ ## @param server.podLabels Extra labels for Prometheus pods
+ ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+ ##
+ podLabels: {}
+ ## @param server.podAnnotations Annotations for Prometheus pods
+ ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+ ##
+ podAnnotations: {}
+ ## @param server.podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAffinityPreset: ""
+ ## @param server.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAntiAffinityPreset: soft
+ ## Pod Disruption Budget configuration
+ ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
+ ## @param server.pdb.create Enable/disable a Pod Disruption Budget creation
+ ## @param server.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
+ ## @param server.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable
+ ##
+ pdb:
+ create: false
+ minAvailable: 1
+ maxUnavailable: ""
+ ## Node affinity preset
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+ ##
+ nodeAffinityPreset:
+ ## @param server.nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+ ##
+ type: ""
+ ## @param server.nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set
+ ##
+ key: ""
+ ## @param server.nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set
+ ## E.g.
+ ## values:
+ ## - e2e-az1
+ ## - e2e-az2
+ ##
+ values: []
+ ## @param server.affinity Affinity for Prometheus pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ## NOTE: `podAffinityPreset`, `podAntiAffinityPreset`, and `nodeAffinityPreset` will be ignored when it's set
+ ##
+ affinity: {}
+ ## @param server.nodeSelector Node labels for Prometheus pods assignment
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+ ## @param server.tolerations Tolerations for Prometheus pods assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ ## @param server.updateStrategy.type Prometheus deployment strategy type. If persistence is enabled, strategy type should be set to Recreate to avoid dead locks.
+ ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
+ ##
+ updateStrategy:
+ ## StrategyType
+ ## Can be set to RollingUpdate or Recreate
+ ##
+ type: RollingUpdate
+
+ ## @param server.priorityClassName Prometheus pods' priorityClassName
+ ##
+ priorityClassName: ""
+ ## @param server.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
+ ##
+ topologySpreadConstraints: []
+ ## @param server.schedulerName Name of the k8s scheduler (other than default) for Prometheus pods
+ ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+ ##
+ schedulerName: ""
+ ## @param server.terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully
+ ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
+ ##
+ terminationGracePeriodSeconds: ""
+ ## @param server.lifecycleHooks for the Prometheus container(s) to automate configuration before or after startup
+ ##
+ lifecycleHooks: {}
+ ## @param server.extraEnvVars Array with extra environment variables to add to Prometheus nodes
+ ## e.g:
+ ## extraEnvVars:
+ ## - name: FOO
+ ## value: "bar"
+ ##
+ extraEnvVars: []
+ ## @param server.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Prometheus nodes
+ ##
+ extraEnvVarsCM: ""
+ ## @param server.extraEnvVarsSecret Name of existing Secret containing extra env vars for Prometheus nodes
+ ##
+ extraEnvVarsSecret: ""
+ ## @param server.extraVolumes Optionally specify extra list of additional volumes for the Prometheus pod(s)
+ ##
+ extraVolumes: []
+ ## @param server.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Prometheus container(s)
+ ##
+ extraVolumeMounts: []
+ ## @param server.sidecars Add additional sidecar containers to the Prometheus pod(s)
+ ## e.g:
+ ## sidecars:
+ ## - name: your-image-name
+ ## image: your-image
+ ## imagePullPolicy: Always
+ ## ports:
+ ## - name: portname
+ ## containerPort: 1234
+ ##
+ sidecars: []
+ ## @param server.initContainers Add additional init containers to the Prometheus pod(s)
+ ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
+ ## e.g:
+ ## initContainers:
+ ## - name: your-image-name
+ ## image: your-image
+ ## imagePullPolicy: Always
+ ## command: ['sh', '-c', 'echo "hello world"']
+ ##
+ initContainers: []
+ ## @param server.routePrefix Prefix for the internal routes of web endpoints
+ ##
+ routePrefix: /
+ ## @param server.remoteWrite The remote_write spec configuration for Prometheus
+ ##
+ remoteWrite: []
+ ## @param server.scrapeInterval Interval between consecutive scrapes. Example: "1m"
+ ##
+ scrapeInterval: ""
+ ## @param server.scrapeTimeout Interval between consecutive scrapes. Example: "10s"
+ ##
+ scrapeTimeout: ""
+ ## @param server.evaluationInterval Interval between consecutive evaluations. Example: "1m"
+ ##
+ evaluationInterval: ""
+ ## @param server.enableAdminAPI Enable Prometheus adminitrative API
+ ## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis
+ ##
+ enableAdminAPI: false
+ ## @param server.enableRemoteWriteReceiver Enable Prometheus to be used as a receiver for the Prometheus remote write protocol.
+ ##
+ enableRemoteWriteReceiver: false
+ ## @param server.enableFeatures Enable access to Prometheus disabled features.
+ ## ref: https://prometheus.io/docs/prometheus/latest/disabled_features/
+ ##
+ enableFeatures: []
+ ## @param server.logLevel Log level for Prometheus
+ ##
+ logLevel: info
+ ## @param server.logFormat Log format for Prometheus
+ ##
+ logFormat: logfmt
+ ## @param server.retention Metrics retention days
+ ##
+ retention: 10d
+ ## @param server.retentionSize Maximum size of metrics
+ ##
+ retentionSize: "0"
+ ## @param server.alertingEndpoints Alertmanagers to which alerts will be sent
+ ## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config
+ ##
+ alertingEndpoints: []
+ ## @param server.externalLabels External labels to add to any time series or alerts when communicating with external systems
+ ##
+ externalLabels: {}
+
+ ## Thanos sidecar container configuration
+ ##
+ thanos:
+ ## @param server.thanos.create Create a Thanos sidecar container
+ ##
+ create: false
+ ## Bitnami Thanos image
+ ## ref: https://hub.docker.com/r/bitnami/thanos/tags/
+ ## @param server.thanos.image.registry Thanos image registry
+ ## @param server.thanos.image.repository Thanos image name
+ ## @param server.thanos.image.tag Thanos image tag
+ ## @param server.thanos.image.digest Thanos image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+ ## @param server.thanos.image.pullPolicy Thanos image pull policy
+ ## @param server.thanos.image.pullSecrets Specify docker-registry secret names as an array
+ ##
+ image:
+ registry: docker.io
+ repository: bitnami/thanos
+ tag: 0.31.0-scratch-r3
+ digest: ""
+ ## Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+ ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+ ##
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## Example:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+ ## Thanos Sidecar container's securityContext
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+ ## @param server.thanos.containerSecurityContext.enabled Enable container security context
+ ## @param server.thanos.containerSecurityContext.readOnlyRootFilesystem mount / (root) as a readonly filesystem
+ ## @param server.thanos.containerSecurityContext.allowPrivilegeEscalation Switch privilegeEscalation possibility on or off
+ ## @param server.thanos.containerSecurityContext.runAsNonRoot Force the container to run as a non root user
+ ## @param server.thanos.containerSecurityContext.capabilities.drop [array] Linux Kernel capabilities which should be dropped
+ ##
+ containerSecurityContext:
+ enabled: true
+ readOnlyRootFilesystem: false
+ allowPrivilegeEscalation: false
+ runAsNonRoot: true
+ capabilities:
+ drop:
+ - ALL
+ ## @param server.thanos.prometheusUrl Override default prometheus url `http://localhost:9090`
+ ##
+ prometheusUrl: ""
+ ## @param server.thanos.extraArgs Additional arguments passed to the thanos sidecar container
+ ## extraArgs:
+ ## - --log.level=debug
+ ## - --tsdb.path=/data/
+ ##
+ extraArgs: []
+ ## @param server.thanos.objectStorageConfig.secretName Support mounting a Secret for the objectStorageConfig of the sideCar container.
+ ## @param server.thanos.objectStorageConfig.secretKey Secret key with the configuration file.
+ ## ref: https://github.com/thanos-io/thanos/blob/main/docs/storage.md
+ ## objectStorageConfig:
+ ## secretName: thanos-objstore-config
+ ## secretKey: thanos.yaml
+ ##
+ objectStorageConfig:
+ secretName: ""
+ secretKey: thanos.yaml
+ ## ref: https://github.com/thanos-io/thanos/blob/main/docs/components/sidecar.md
+ ## @param server.thanos.extraVolumeMounts Additional volumeMounts from `server.volumes` for thanos sidecar container
+ ## extraVolumeMounts:
+ ## - name: my-secret-volume
+ ## mountPath: /etc/thanos/secrets/my-secret
+ ##
+ extraVolumeMounts: []
+ ## Thanos sidecar container resource requests and limits.
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## We usually recommend not to specify default resources and to leave this as a conscious
+ ## choice for the user. This also increases chances charts run on environments with little
+ ## resources, such as Minikube. If you do want to specify resources, uncomment the following
+ ## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ ## @param server.thanos.resources.limits The resources limits for the Thanos sidecar container
+ ## @param server.thanos.resources.requests The resources requests for the Thanos sidecar container
+ ##
+ resources:
+ ## Example:
+ ## limits:
+ ## cpu: 100m
+ ## memory: 128Mi
+ ##
+ limits: {}
+ ## Examples:
+ ## requests:
+ ## cpu: 100m
+ ## memory: 128Mi
+ ##
+ requests: {}
+ ## Configure extra options for liveness probe
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
+ ## @param server.thanos.livenessProbe.enabled Turn on and off liveness probe
+ ## @param server.thanos.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated
+ ## @param server.thanos.livenessProbe.periodSeconds How often to perform the probe
+ ## @param server.thanos.livenessProbe.timeoutSeconds When the probe times out
+ ## @param server.thanos.livenessProbe.failureThreshold Minimum consecutive failures for the probe
+ ## @param server.thanos.livenessProbe.successThreshold Minimum consecutive successes for the probe
+ ##
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 0
+ periodSeconds: 5
+ timeoutSeconds: 3
+ failureThreshold: 120
+ successThreshold: 1
+ ## Configure extra options for readiness probe
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
+ ## @param server.thanos.readinessProbe.enabled Turn on and off readiness probe
+ ## @param server.thanos.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated
+ ## @param server.thanos.readinessProbe.periodSeconds How often to perform the probe
+ ## @param server.thanos.readinessProbe.timeoutSeconds When the probe times out
+ ## @param server.thanos.readinessProbe.failureThreshold Minimum consecutive failures for the probe
+ ## @param server.thanos.readinessProbe.successThreshold Minimum consecutive successes for the probe
+ ##
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 0
+ periodSeconds: 5
+ timeoutSeconds: 3
+ failureThreshold: 120
+ successThreshold: 1
+ ## @param server.thanos.customLivenessProbe Custom livenessProbe that overrides the default one
+ ##
+ customLivenessProbe: {}
+ ## @param server.thanos.customReadinessProbe Custom readinessProbe that overrides the default one
+ ##
+ customReadinessProbe: {}
+ ## Thanos Sidecar Service
+ ##
+ service:
+ ## @param server.thanos.service.type Kubernetes service type
+ ##
+ type: ClusterIP
+ ## @param server.thanos.service.ports.grpc Thanos service port
+ ##
+ ports:
+ grpc: 10901
+ ## @param server.thanos.service.clusterIP Specific cluster IP when service type is cluster IP. Use `None` to create headless service by default.
+ ## Use a "headless" service by default so it returns every pod's IP instead of loadbalancing requests.
+ ##
+ clusterIP: None
+ ## @param server.thanos.service.nodePorts.grpc Specify the nodePort value for the LoadBalancer and NodePort service types.
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ ## e.g:
+ ## nodePort: 30901
+ ##
+ nodePorts:
+ grpc: ""
+ ## @param server.thanos.service.loadBalancerIP `loadBalancerIP` if service type is `LoadBalancer`
+ ## Set the LoadBalancer service type to internal only
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+ ##
+ loadBalancerIP: ""
+ ## @param server.thanos.service.loadBalancerSourceRanges Address that are allowed when svc is `LoadBalancer`
+ ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
+ ## e.g:
+ ## loadBalancerSourceRanges:
+ ## - 10.10.10.0/24
+ ##
+ loadBalancerSourceRanges: []
+ ## @param server.thanos.service.annotations Additional annotations for Prometheus service
+ ##
+ annotations: {}
+ ## @param server.thanos.service.extraPorts Additional ports to expose from the Thanos sidecar container
+ ## extraPorts:
+ ## - name: http
+ ## port: 10902
+ ## targetPort: http
+ ## protocol: TCP
+ ##
+ extraPorts: []
+ ## @param server.thanos.service.externalTrafficPolicy Prometheus service external traffic policy
+ ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ ##
+ externalTrafficPolicy: Cluster
+ ## @param server.thanos.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
+ ## If "ClientIP", consecutive client requests will be directed to the same Pod
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ ##
+ sessionAffinity: None
+ ## @param server.thanos.service.sessionAffinityConfig Additional settings for the sessionAffinity
+ ## sessionAffinityConfig:
+ ## clientIP:
+ ## timeoutSeconds: 300
+ ##
+ sessionAffinityConfig: {}
+
+ ## Configure the ingress resource that allows you to access the
+ ## Thanos Sidecar installation. Set up the URL
+ ## ref: https://kubernetes.io/docs/user-guide/ingress/
+ ##
+ ingress:
+ ## @param server.thanos.ingress.enabled Enable ingress controller resource
+ ##
+ enabled: false
+ ## @param server.thanos.ingress.pathType Ingress path type
+ ##
+ pathType: ImplementationSpecific
+ ## @param server.thanos.ingress.hostname Default host for the ingress record
+ ##
+ hostname: thanos.prometheus.local
+ ## @param server.thanos.ingress.path Default path for the ingress record
+ ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers
+ ##
+ path: /
+ ## @param server.thanos.ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
+ ## For a full list of possible ingress annotations, please see
+ ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
+ ## Use this parameter to set the required annotations for cert-manager, see
+ ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
+ ##
+ ## Examples:
+ ## kubernetes.io/ingress.class: nginx
+ ## cert-manager.io/cluster-issuer: cluster-issuer-name
+ ##
+ annotations: {}
+ ## @param server.thanos.ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
+ ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
+ ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
+ ##
+ ingressClassName: ""
+ ## @param server.thanos.ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter
+ ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}`
+ ## You can:
+ ## - Use the `ingress.secrets` parameter to create this TLS secret
+ ## - Relay on cert-manager to create it by setting `ingress.certManager=true`
+ ## - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true`
+ ##
+ tls: false
+ ## @param server.thanos.ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
+ ##
+ selfSigned: false
+ ## @param server.thanos.ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record
+ ## e.g:
+ ## extraHosts:
+ ## - name: thanos.prometheus.local
+ ## path: /
+ ##
+ extraHosts: []
+ ## @param server.thanos.ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host
+ ## e.g:
+ ## extraPaths:
+ ## - path: /*
+ ## backend:
+ ## serviceName: ssl-redirect
+ ## servicePort: use-annotation
+ ##
+ extraPaths: []
+ ## @param server.thanos.ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
+ ## e.g:
+ ## extraTls:
+ ## - hosts:
+ ## - thanos.prometheus.local
+ ## secretName: thanos.prometheus.local-tls
+ ##
+ extraTls: []
+ ## @param server.thanos.ingress.secrets Custom TLS certificates as secrets
+ ## NOTE: 'key' and 'certificate' are expected in PEM format
+ ## NOTE: 'name' should line up with a 'secretName' set further up
+ ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates
+ ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days
+ ## It is also possible to create and manage the certificates outside of this helm chart
+ ## Please see README.md for more information
+ ## e.g:
+ ## secrets:
+ ## - name: thanos.prometheus.local-tls
+ ## key: |-
+ ## -----BEGIN RSA PRIVATE KEY-----
+ ## ...
+ ## -----END RSA PRIVATE KEY-----
+ ## certificate: |-
+ ## -----BEGIN CERTIFICATE-----
+ ## ...
+ ## -----END CERTIFICATE-----
+ ##
+ secrets: []
+ ## @param server.thanos.ingress.extraRules The list of additional rules to be added to this ingress record. Evaluated as a template
+ ## Useful when looking for additional customization, such as using different backend
+ ##
+ extraRules: []
+
+ ## Prometheus Server ingress parameters
+ ## ref: http://kubernetes.io/docs/user-guide/ingress/
+ ##
+ ingress:
+ ## @param server.ingress.enabled Enable ingress record generation for Prometheus
+ ##
+ enabled: false
+ ## @param server.ingress.pathType Ingress path type
+ ##
+ pathType: ImplementationSpecific
+ ## @param server.ingress.hostname Default host for the ingress record
+ ##
+ hostname: server.prometheus.local
+ ## @param server.ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
+ ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
+ ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
+ ##
+ ingressClassName: ""
+ ## @param server.ingress.path Default path for the ingress record
+ ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers
+ ##
+ path: /
+ ## @param server.ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
+ ## Use this parameter to set the required annotations for cert-manager, see
+ ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
+ ## e.g:
+ ## annotations:
+ ## kubernetes.io/ingress.class: nginx
+ ## cert-manager.io/cluster-issuer: cluster-issuer-name
+ ##
+ annotations: {}
+ ## @param server.ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter
+ ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}`
+ ## You can:
+ ## - Use the `ingress.secrets` parameter to create this TLS secret
+ ## - Rely on cert-manager to create it by setting the corresponding annotations
+ ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true`
+ ##
+ tls: false
+ ## @param server.ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
+ ##
+ selfSigned: false
+ ## @param server.ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record
+ ## e.g:
+ ## extraHosts:
+ ## - name: prometheus.local
+ ## path: /
+ ##
+ extraHosts: []
+ ## @param server.ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host
+ ## e.g:
+ ## extraPaths:
+ ## - path: /*
+ ## backend:
+ ## serviceName: ssl-redirect
+ ## servicePort: use-annotation
+ ##
+ extraPaths: []
+ ## @param server.ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
+ ## e.g:
+ ## extraTls:
+ ## - hosts:
+ ## - prometheus.local
+ ## secretName: prometheus.local-tls
+ ##
+ extraTls: []
+ ## @param server.ingress.secrets Custom TLS certificates as secrets
+ ## NOTE: 'key' and 'certificate' are expected in PEM format
+ ## NOTE: 'name' should line up with a 'secretName' set further up
+ ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates
+ ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days
+ ## It is also possible to create and manage the certificates outside of this helm chart
+ ## Please see README.md for more information
+ ## e.g:
+ ## secrets:
+ ## - name: prometheus.local-tls
+ ## key: |-
+ ## -----BEGIN RSA PRIVATE KEY-----
+ ## ...
+ ## -----END RSA PRIVATE KEY-----
+ ## certificate: |-
+ ## -----BEGIN CERTIFICATE-----
+ ## ...
+ ## -----END CERTIFICATE-----
+ ##
+ secrets: []
+ ## @param server.ingress.extraRules Additional rules to be covered with this ingress record
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
+ ## e.g:
+ ## extraRules:
+ ## - host: example.local
+ ## http:
+ ## path: /
+ ## backend:
+ ## service:
+ ## name: example-svc
+ ## port:
+ ## name: http
+ ##
+ extraRules: []
+
+ ## ServiceAccount configuration
+ ##
+ serviceAccount:
+ ## @param server.serviceAccount.create Specifies whether a ServiceAccount should be created
+ ##
+ create: true
+ ## @param server.serviceAccount.name The name of the ServiceAccount to use.
+ ## If not set and create is true, a name is generated using the common.names.fullname template
+ ##
+ name: ""
+ ## @param server.serviceAccount.annotations Additional Service Account annotations (evaluated as a template)
+ ##
+ annotations: {}
+ ## @param server.serviceAccount.automountServiceAccountToken Automount service account token for the server service account
+ ##
+ automountServiceAccountToken: true
+
+ ## Prometheus service parameters
+ ##
+ service:
+ ## @param server.service.type Prometheus service type
+ ##
+ type: LoadBalancer
+ ## @param server.service.ports.http Prometheus service HTTP port
+ ##
+ ports:
+ http: 80
+ ## Node ports to expose
+ ## @param server.service.nodePorts.http Node port for HTTP
+ ## NOTE: choose port between <30000-32767>
+ ##
+ nodePorts:
+ http: ""
+ ## @param server.service.clusterIP Prometheus service Cluster IP
+ ## e.g.:
+ ## clusterIP: None
+ ##
+ clusterIP: ""
+ ## @param server.service.loadBalancerIP Prometheus service Load Balancer IP
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
+ ##
+ loadBalancerIP: ""
+ ## @param server.service.loadBalancerSourceRanges Prometheus service Load Balancer sources
+ ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
+ ## e.g:
+ ## loadBalancerSourceRanges:
+ ## - 10.10.10.0/24
+ ##
+ loadBalancerSourceRanges: []
+ ## @param server.service.externalTrafficPolicy Prometheus service external traffic policy
+ ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ ##
+ externalTrafficPolicy: Cluster
+ ## @param server.service.annotations Additional custom annotations for Prometheus service
+ ##
+ annotations: {}
+ ## @param server.service.extraPorts Extra ports to expose in Prometheus service (normally used with the `sidecars` value)
+ ##
+ extraPorts: []
+ ## @param server.service.sessionAffinity Control where client requests go, to the same pod or round-robin. ClientIP by default.
+ ## Values: ClientIP or None
+ ## ref: https://kubernetes.io/docs/user-guide/services/
+ ##
+ sessionAffinity: ClientIP
+ ## @param server.service.sessionAffinityConfig Additional settings for the sessionAffinity
+ ## sessionAffinityConfig:
+ ## clientIP:
+ ## timeoutSeconds: 300
+ ##
+ sessionAffinityConfig: {}
+
+ ## Persistence Parameters
+ ##
+
+ ## Enable persistence using Persistent Volume Claims
+ ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/
+ ##
+ persistence:
+ ## @param server.persistence.enabled Enable persistence using Persistent Volume Claims. If you have multiple instances (server.repicacount > 1), please considere using an external storage service like Thanos or Grafana Mimir
+ ##
+ enabled: false
+ ## @param server.persistence.mountPath Path to mount the volume at.
+ ##
+ mountPath: /bitnami/prometheus/data
+ ## @param server.persistence.subPath The subdirectory of the volume to mount to, useful in dev environments and one PV for multiple services
+ ##
+ subPath: ""
+ ## @param server.persistence.storageClass Storage class of backing PVC
+ ## If defined, storageClassName:
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ storageClass: ""
+ ## @param server.persistence.annotations Persistent Volume Claim annotations
+ ##
+ annotations: {}
+ ## @param server.persistence.accessModes Persistent Volume Access Modes
+ ##
+ accessModes:
+ - ReadWriteOnce
+ ## @param server.persistence.size Size of data volume
+ ##
+ size: 8Gi
+ ## @param server.persistence.existingClaim The name of an existing PVC to use for persistence
+ ##
+ existingClaim: ""
+ ## @param server.persistence.selector Selector to match an existing Persistent Volume for WordPress data PVC
+ ## If set, the PVC can't have a PV dynamically provisioned for it
+ ## E.g.
+ ## selector:
+ ## matchLabels:
+ ## app: my-app
+ ##
+ selector: {}
+ ## @param server.persistence.dataSource Custom PVC data source
+ ##
+ dataSource: {}
+
+ # RBAC configuration
+ ##
+ rbac:
+ ## @param server.rbac.create Specifies whether RBAC resources should be created
+ ##
+ create: true
+ ## @param server.rbac.rules Custom RBAC rules to set
+ ## e.g:
+ ## rules:
+ ## - apiGroups:
+ ## - ""
+ ## resources:
+ ## - pods
+ ## verbs:
+ ## - get
+ ## - list
+ ##
+ rules: []
+
+## @section Init Container Parameters
+##
+
+## 'volumePermissions' init container parameters
+## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values
+## based on the *podSecurityContext/*containerSecurityContext parameters
+##
+volumePermissions:
+ ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup`
+ ##
+ enabled: false
+ ## Bitnami Shell image
+ ## ref: https://hub.docker.com/r/bitnami/bitnami-shell/tags/
+ ## @param volumePermissions.image.registry Bitnami Shell image registry
+ ## @param volumePermissions.image.repository Bitnami Shell image repository
+ ## @param volumePermissions.image.tag Bitnami Shell image tag (immutable tags are recommended)
+ ## @param volumePermissions.image.pullPolicy Bitnami Shell image pull policy
+ ## @param volumePermissions.image.pullSecrets Bitnami Shell image pull secrets
+ ##
+ image:
+ registry: docker.io
+ repository: bitnami/bitnami-shell
+ tag: 11-debian-11-r99
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## e.g:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ pullSecrets: []
+ ## Init container's resource requests and limits
+ ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+ ## @param volumePermissions.resources.limits The resources limits for the init container
+ ## @param volumePermissions.resources.requests The requested resources for the init container
+ ##
+ resources:
+ limits: {}
+ requests: {}
+ ## Init container Container Security Context
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+ ## @param volumePermissions.containerSecurityContext.runAsUser Set init container's Security Context runAsUser
+ ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the
+ ## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2`
+ ## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed)
+ ##
+ containerSecurityContext:
+ runAsUser: 0
\ No newline at end of file