mirror of
https://github.com/bitnami/charts.git
synced 2026-04-01 06:47:23 +08:00
[bitnami/prometheus] Add chart (#15543)
* [bitnami/prometheus] Add chart --------- Signed-off-by: Fran Mulero <fmulero@vmware.com> Signed-off-by: Bitnami Containers <bitnami-bot@vmware.com> Co-authored-by: Bitnami Containers <bitnami-bot@vmware.com>
This commit is contained in:
1
.github/workflows/cd-pipeline.yml
vendored
1
.github/workflows/cd-pipeline.yml
vendored
@@ -87,6 +87,7 @@ on: # rebuild any PRs and main branch changes
|
||||
- 'bitnami/postgresql/**'
|
||||
- 'bitnami/postgresql-ha/**'
|
||||
- 'bitnami/prestashop/**'
|
||||
- 'bitnami/prometheus/**'
|
||||
- 'bitnami/pytorch/**'
|
||||
- 'bitnami/rabbitmq-cluster-operator/**'
|
||||
- 'bitnami/rabbitmq/**'
|
||||
|
||||
18
.vib/prometheus/cypress/cypress.env.json
Normal file
18
.vib/prometheus/cypress/cypress.env.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"deployments": {
|
||||
"alertmanager": {
|
||||
"query": "alertmanager_alerts"
|
||||
},
|
||||
"prometheus": {
|
||||
"query": "prometheus_http_requests_total"
|
||||
}
|
||||
},
|
||||
"targets": {
|
||||
"alertmanager": {
|
||||
"replicaCount": 2
|
||||
},
|
||||
"prometheus": {
|
||||
"replicaCount": 1
|
||||
}
|
||||
}
|
||||
}
|
||||
4
.vib/prometheus/cypress/cypress.json
Normal file
4
.vib/prometheus/cypress/cypress.json
Normal file
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"baseUrl": "http://localhost:8080",
|
||||
"defaultCommandTimeout": 30000
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
/// <reference types="cypress" />
|
||||
|
||||
it('allows executing a query and displaying response data for each deployment', () => {
|
||||
const deployments = Cypress.env('deployments');
|
||||
|
||||
cy.visit(`/graph`);
|
||||
Object.keys(deployments).forEach((podName, i) => {
|
||||
const query = Object.values(deployments)[i].query;
|
||||
|
||||
cy.get('[role="textbox"]').clear({force: true}).type(`${query}{enter}`,{delay: 100});
|
||||
cy.contains('Execute').click();
|
||||
cy.contains('.data-table', `container="${podName}"`)
|
||||
})
|
||||
});
|
||||
|
||||
it('checks targets status', () => {
|
||||
const targets = Cypress.env('targets');
|
||||
|
||||
Object.keys(targets).forEach((podName, i) => {
|
||||
const podData = Object.values(targets)[i];
|
||||
|
||||
cy.visit(`/targets?search=${podName}`);
|
||||
cy.contains(`${podData.replicaCount}/${podData.replicaCount} up`);
|
||||
})
|
||||
});
|
||||
74
.vib/prometheus/goss/goss.yaml
Normal file
74
.vib/prometheus/goss/goss.yaml
Normal file
@@ -0,0 +1,74 @@
|
||||
command:
|
||||
check-no-capabilities:
|
||||
exec: cat /proc/1/status
|
||||
exit-status: 0
|
||||
stdout:
|
||||
- "CapInh: 0000000000000000"
|
||||
- "CapPrm: 0000000000000000"
|
||||
- "CapEff: 0000000000000000"
|
||||
- "CapBnd: 0000000000000000"
|
||||
- "CapAmb: 0000000000000000"
|
||||
{{- $uid := .Vars.server.podSecurityContext.runAsUser }}
|
||||
{{- $gid := .Vars.server.podSecurityContext.fsGroup }}
|
||||
check-user-info:
|
||||
# The UID and GID should always be either the one specified as vars (always a bigger number that the default)
|
||||
# or the one randomly defined by openshift (larger values). Otherwise, the chart is still using the default value.
|
||||
exec: if [ $(id -u) -lt {{ $uid }} ] || [ $(id -G | awk '{print $2}') -lt {{ $gid }} ]; then exit 1; fi
|
||||
exit-status: 0
|
||||
{{ if .Vars.server.serviceAccount.automountServiceAccountToken }}
|
||||
check-sa:
|
||||
exec: cat /var/run/secrets/kubernetes.io/serviceaccount/token | cut -d '.' -f 2 | xargs -I '{}' echo '{}====' | fold -w 4 | sed '$ d' | tr -d '\n' | base64 -d
|
||||
exit-status: 0
|
||||
stdout:
|
||||
- /serviceaccount.*name.*{{.Env.BITNAMI_APP_NAME }}/
|
||||
{{ end }}
|
||||
file:
|
||||
/opt/bitnami/prometheus/conf/{{ .Vars.server.existingConfigmapKey }}:
|
||||
exists: true
|
||||
contains:
|
||||
- "job_name: alertmanager"
|
||||
- "{{ (first .Vars.server.extraScrapeConfigs).job_name }}"
|
||||
/opt/bitnami/prometheus/conf/rules.yaml:
|
||||
exists: true
|
||||
contains:
|
||||
- "{{ (first .Vars.server.alertingRules.groups).name }}"
|
||||
{{.Vars.server.persistence.mountPath}}:
|
||||
exists: true
|
||||
filetype: directory
|
||||
mode: "2775"
|
||||
owner: root
|
||||
/proc/1/cmdline:
|
||||
exists: true
|
||||
contains:
|
||||
- "--enable-feature={{ .Vars.server.enableFeatures | first }}"
|
||||
http:
|
||||
http://localhost:{{ .Vars.server.containerPorts.http }}/-/ready:
|
||||
status: 200
|
||||
body:
|
||||
- "Prometheus Server is Ready."
|
||||
http://localhost:{{ .Vars.server.containerPorts.http }}/-/healthy:
|
||||
status: 200
|
||||
body:
|
||||
- "Prometheus Server is Healthy."
|
||||
{{- if .Vars.alertmanager.enabled }}
|
||||
http://prometheus-alertmanager:{{ .Vars.alertmanager.service.ports.http }}/-/healthy:
|
||||
status: 200
|
||||
body:
|
||||
- "OK"
|
||||
http://localhost:{{ .Vars.server.containerPorts.http }}/api/v1/rules?name={{ (first .Vars.server.alertingRules.groups).name }}:
|
||||
status: 200
|
||||
body:
|
||||
- "{{ (first (first .Vars.server.alertingRules.groups).rules).annotations.summary }}"
|
||||
{{- end }}
|
||||
command:
|
||||
check-config-files:
|
||||
exec: promtool check config /opt/bitnami/prometheus/conf/{{ .Vars.server.existingConfigmapKey }}
|
||||
exit-status: 0
|
||||
stdout:
|
||||
- SUCCESS
|
||||
check-metrics:
|
||||
exec: promtool query instant http://localhost:{{ .Vars.server.containerPorts.http }} prometheus_http_requests_total
|
||||
exit-status: 0
|
||||
stdout:
|
||||
- "/-/healthy"
|
||||
- "/-/ready"
|
||||
68
.vib/prometheus/runtime-parameters.yaml
Normal file
68
.vib/prometheus/runtime-parameters.yaml
Normal file
@@ -0,0 +1,68 @@
|
||||
volumePermissions:
|
||||
enabled: true
|
||||
server:
|
||||
replicaCount: 1
|
||||
serviceAccount:
|
||||
create: true
|
||||
automountServiceAccountToken: true
|
||||
podSecurityContext:
|
||||
enabled: true
|
||||
runAsUser: 1002
|
||||
fsGroup: 1002
|
||||
containerSecurityContext:
|
||||
enabled: true
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
runAsNonRoot: true
|
||||
readOnlyRootFilesystem: false
|
||||
runAsUser: 1002
|
||||
rbac:
|
||||
create: true
|
||||
persistence:
|
||||
enabled: true
|
||||
mountPath: /opt/bitnami/prometheus/data
|
||||
service:
|
||||
type: LoadBalancer
|
||||
ports:
|
||||
http: 80
|
||||
enableFeatures: [ "memory-snapshot-on-shutdown" ]
|
||||
containerPorts:
|
||||
http: 8080
|
||||
existingConfigmapKey: test.yaml
|
||||
extraScrapeConfigs:
|
||||
- job_name: wordpress
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- default
|
||||
metrics_path: /metrics
|
||||
relabel_configs:
|
||||
- source_labels:
|
||||
- job
|
||||
target_label: __tmp_wordpress_job_name
|
||||
- action: keep
|
||||
source_labels:
|
||||
- __meta_kubernetes_service_label_app_kubernetes_io_instance
|
||||
- __meta_kubernetes_service_labelpresent_app_kubernetes_io_instance
|
||||
regex: (wordpress);true
|
||||
alertingRules:
|
||||
groups:
|
||||
- name: example
|
||||
rules:
|
||||
- alert: Example
|
||||
expr: count(prometheus_http_requests_total) > 2
|
||||
for: 10m
|
||||
labels:
|
||||
severity: page
|
||||
annotations:
|
||||
summary: High number of requests to prometheus
|
||||
alertmanager:
|
||||
enabled: true
|
||||
replicaCount: 2
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
service:
|
||||
ports:
|
||||
http: 9095
|
||||
85
.vib/prometheus/vib-publish.json
Normal file
85
.vib/prometheus/vib-publish.json
Normal file
@@ -0,0 +1,85 @@
|
||||
{
|
||||
"phases": {
|
||||
"package": {
|
||||
"context": {
|
||||
"resources": {
|
||||
"url": "{SHA_ARCHIVE}",
|
||||
"path": "/bitnami/prometheus"
|
||||
}
|
||||
},
|
||||
"actions": [
|
||||
{
|
||||
"action_id": "helm-package"
|
||||
},
|
||||
{
|
||||
"action_id": "helm-lint"
|
||||
}
|
||||
]
|
||||
},
|
||||
"verify": {
|
||||
"context": {
|
||||
"resources": {
|
||||
"url": "{SHA_ARCHIVE}",
|
||||
"path": "/bitnami/prometheus"
|
||||
},
|
||||
"target_platform": {
|
||||
"target_platform_id": "{VIB_ENV_TARGET_PLATFORM}",
|
||||
"size": {
|
||||
"name": "S4"
|
||||
}
|
||||
}
|
||||
},
|
||||
"actions": [
|
||||
{
|
||||
"action_id": "health-check",
|
||||
"params": {
|
||||
"endpoint": "lb-prometheus-server-http",
|
||||
"app_protocol": "HTTP"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action_id": "cypress",
|
||||
"params": {
|
||||
"resources": {
|
||||
"path": "/.vib/prometheus/cypress"
|
||||
},
|
||||
"endpoint": "lb-prometheus-server-http",
|
||||
"app_protocol": "HTTP"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action_id": "goss",
|
||||
"params": {
|
||||
"resources": {
|
||||
"path": "/.vib"
|
||||
},
|
||||
"tests_file": "prometheus/goss/goss.yaml",
|
||||
"vars_file": "prometheus/runtime-parameters.yaml",
|
||||
"remote": {
|
||||
"pod": {
|
||||
"workload": "deploy-prometheus-server"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"publish": {
|
||||
"actions": [
|
||||
{
|
||||
"action_id": "helm-publish",
|
||||
"params": {
|
||||
"repository": {
|
||||
"kind": "S3",
|
||||
"url": "{VIB_ENV_S3_URL}",
|
||||
"authn": {
|
||||
"access_key_id": "{VIB_ENV_S3_ACCESS_KEY_ID}",
|
||||
"secret_access_key": "{VIB_ENV_S3_SECRET_ACCESS_KEY}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
68
.vib/prometheus/vib-verify.json
Normal file
68
.vib/prometheus/vib-verify.json
Normal file
@@ -0,0 +1,68 @@
|
||||
{
|
||||
"phases": {
|
||||
"package": {
|
||||
"context": {
|
||||
"resources": {
|
||||
"url": "{SHA_ARCHIVE}",
|
||||
"path": "/bitnami/prometheus"
|
||||
}
|
||||
},
|
||||
"actions": [
|
||||
{
|
||||
"action_id": "helm-package"
|
||||
},
|
||||
{
|
||||
"action_id": "helm-lint"
|
||||
}
|
||||
]
|
||||
},
|
||||
"verify": {
|
||||
"context": {
|
||||
"resources": {
|
||||
"url": "{SHA_ARCHIVE}",
|
||||
"path": "/bitnami/prometheus"
|
||||
},
|
||||
"target_platform": {
|
||||
"target_platform_id": "{VIB_ENV_TARGET_PLATFORM}",
|
||||
"size": {
|
||||
"name": "S4"
|
||||
}
|
||||
}
|
||||
},
|
||||
"actions": [
|
||||
{
|
||||
"action_id": "health-check",
|
||||
"params": {
|
||||
"endpoint": "lb-prometheus-server-http",
|
||||
"app_protocol": "HTTP"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action_id": "cypress",
|
||||
"params": {
|
||||
"resources": {
|
||||
"path": "/.vib/prometheus/cypress"
|
||||
},
|
||||
"endpoint": "lb-prometheus-server-http",
|
||||
"app_protocol": "HTTP"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action_id": "goss",
|
||||
"params": {
|
||||
"resources": {
|
||||
"path": "/.vib"
|
||||
},
|
||||
"tests_file": "prometheus/goss/goss.yaml",
|
||||
"vars_file": "prometheus/runtime-parameters.yaml",
|
||||
"remote": {
|
||||
"pod": {
|
||||
"workload": "deploy-prometheus-server"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
6
bitnami/prometheus/Chart.lock
Normal file
6
bitnami/prometheus/Chart.lock
Normal file
@@ -0,0 +1,6 @@
|
||||
dependencies:
|
||||
- name: common
|
||||
repository: oci://registry-1.docker.io/bitnamicharts
|
||||
version: 2.2.5
|
||||
digest: sha256:318f438acfeaced11d9060877d615caf1985417d2865810defaa886d3496f8d3
|
||||
generated: "2023-05-04T15:49:37.865565+02:00"
|
||||
26
bitnami/prometheus/Chart.yaml
Normal file
26
bitnami/prometheus/Chart.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
annotations:
|
||||
category: Analytics
|
||||
licenses: Apache-2.0
|
||||
apiVersion: v2
|
||||
appVersion: 2.42.0
|
||||
dependencies:
|
||||
- name: common
|
||||
repository: oci://registry-1.docker.io/bitnamicharts
|
||||
tags:
|
||||
- bitnami-common
|
||||
version: 2.x.x
|
||||
description: Prometheus is an open source monitoring and alerting system. It enables sysadmins to monitor their infrastructures by collecting metrics from configured targets at given intervals.
|
||||
home: https://github.com/prometheus/prometheus
|
||||
icon: https://bitnami.com/assets/stacks/prometheus/img/prometheus-stack-220x234.png
|
||||
keywords:
|
||||
- prometheus
|
||||
- monitoring
|
||||
maintainers:
|
||||
- name: Bitnami
|
||||
url: https://github.com/bitnami/charts
|
||||
name: prometheus
|
||||
sources:
|
||||
- https://github.com/bitnami/containers/tree/main/bitnami/prometheus
|
||||
- https://github.com/prometheus/prometheus
|
||||
- https://github.com/prometheus-community/helm-charts
|
||||
version: 0.1.0
|
||||
675
bitnami/prometheus/README.md
Normal file
675
bitnami/prometheus/README.md
Normal file
@@ -0,0 +1,675 @@
|
||||
<!--- app-name: Prometheus -->
|
||||
|
||||
# Prometheus packaged by Bitnami
|
||||
|
||||
Prometheus is an open source monitoring and alerting system. It enables sysadmins to monitor their infrastructures by collecting metrics from configured targets at given intervals.
|
||||
|
||||
[Overview of Prometheus](https://prometheus.io/)
|
||||
|
||||
## TL;DR
|
||||
|
||||
```console
|
||||
helm install my-release oci://registry-1.docker.io/bitnamicharts/prometheus
|
||||
```
|
||||
|
||||
## Introduction
|
||||
|
||||
Bitnami charts for Helm are carefully engineered, actively maintained and are the quickest and easiest way to deploy containers on a Kubernetes cluster that are ready to handle production workloads.
|
||||
|
||||
This chart bootstraps a [Prometheus](https://prometheus.io) Deployment in a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
||||
|
||||
Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
|
||||
|
||||
[Learn more about the default configuration of the chart](https://docs.bitnami.com/kubernetes/infrastructure/prometheus/get-started/).
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Kubernetes 1.19+
|
||||
- Helm 3.2.0+
|
||||
- PV provisioner support in the underlying infrastructure
|
||||
- ReadWriteMany volumes for deployment scaling
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```console
|
||||
helm install my-release oci://registry-1.docker.io/bitnamicharts/prometheus
|
||||
```
|
||||
|
||||
The command deploys Prometheus on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
|
||||
|
||||
> **Tip**: List all releases using `helm list`
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
```console
|
||||
helm delete my-release
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Parameters
|
||||
|
||||
### Global parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------- | ----------------------------------------------- | ----- |
|
||||
| `global.imageRegistry` | Global Docker image registry | `""` |
|
||||
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
|
||||
| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | --------------------------------------------------------------------------------------- | --------------- |
|
||||
| `kubeVersion` | Override Kubernetes version | `""` |
|
||||
| `nameOverride` | String to partially override common.names.name | `""` |
|
||||
| `fullnameOverride` | String to fully override common.names.fullname | `""` |
|
||||
| `namespaceOverride` | String to fully override common.names.namespace | `""` |
|
||||
| `commonLabels` | Labels to add to all deployed objects | `{}` |
|
||||
| `commonAnnotations` | Annotations to add to all deployed objects | `{}` |
|
||||
| `clusterDomain` | Kubernetes cluster domain name | `cluster.local` |
|
||||
| `extraDeploy` | Array of extra objects to deploy with the release | `[]` |
|
||||
| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` |
|
||||
| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` |
|
||||
| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` |
|
||||
| `ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` |
|
||||
|
||||
### Alertmanager Parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| -------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------- |
|
||||
| `alertmanager.enabled` | Alertmanager enabled | `true` |
|
||||
| `alertmanager.image.registry` | Alertmanager image registry | `docker.io` |
|
||||
| `alertmanager.image.repository` | Alertmanager image repository | `bitnami/alertmanager` |
|
||||
| `alertmanager.image.tag` | Alertmanager image tag (immutable tags are recommended) | `0.25.0-debian-11-r48` |
|
||||
| `alertmanager.image.digest` | Alertmanager image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) | `""` |
|
||||
| `alertmanager.image.pullPolicy` | Alertmanager image pull policy | `IfNotPresent` |
|
||||
| `alertmanager.image.pullSecrets` | Alertmanager image pull secrets | `[]` |
|
||||
| `alertmanager.configuration` | Alertmanager configuration. This content will be stored in the the alertmanager.yaml file and the content can be a template. | `""` |
|
||||
| `alertmanager.replicaCount` | Number of Alertmanager replicas to deploy | `1` |
|
||||
| `alertmanager.containerPorts.http` | Alertmanager HTTP container port | `9093` |
|
||||
| `alertmanager.containerPorts.cluster` | Alertmanager Cluster HA port | `9094` |
|
||||
| `alertmanager.livenessProbe.enabled` | Enable livenessProbe on Alertmanager containers | `true` |
|
||||
| `alertmanager.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` |
|
||||
| `alertmanager.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` |
|
||||
| `alertmanager.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `3` |
|
||||
| `alertmanager.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` |
|
||||
| `alertmanager.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
|
||||
| `alertmanager.readinessProbe.enabled` | Enable readinessProbe on Alertmanager containers | `true` |
|
||||
| `alertmanager.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
|
||||
| `alertmanager.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
|
||||
| `alertmanager.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `2` |
|
||||
| `alertmanager.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` |
|
||||
| `alertmanager.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
|
||||
| `alertmanager.startupProbe.enabled` | Enable startupProbe on Alertmanager containers | `false` |
|
||||
| `alertmanager.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `2` |
|
||||
| `alertmanager.startupProbe.periodSeconds` | Period seconds for startupProbe | `5` |
|
||||
| `alertmanager.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `2` |
|
||||
| `alertmanager.startupProbe.failureThreshold` | Failure threshold for startupProbe | `10` |
|
||||
| `alertmanager.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
|
||||
| `alertmanager.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
|
||||
| `alertmanager.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
|
||||
| `alertmanager.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
|
||||
| `alertmanager.resources.limits` | The resources limits for the Alertmanager containers | `{}` |
|
||||
| `alertmanager.resources.requests` | The requested resources for the Alertmanager containers | `{}` |
|
||||
| `alertmanager.podSecurityContext.enabled` | Enabled Alertmanager pods' Security Context | `true` |
|
||||
| `alertmanager.podSecurityContext.fsGroup` | Set Alertmanager pod's Security Context fsGroup | `1001` |
|
||||
| `alertmanager.containerSecurityContext.enabled` | Enabled Alertmanager containers' Security Context | `true` |
|
||||
| `alertmanager.containerSecurityContext.runAsUser` | Set Alertmanager containers' Security Context runAsUser | `1001` |
|
||||
| `alertmanager.containerSecurityContext.runAsNonRoot` | Set Alertmanager containers' Security Context runAsNonRoot | `true` |
|
||||
| `alertmanager.containerSecurityContext.readOnlyRootFilesystem` | Set Alertmanager containers' Security Context runAsNonRoot | `false` |
|
||||
| `alertmanager.existingConfigmap` | The name of an existing ConfigMap with your custom configuration for Alertmanager | `""` |
|
||||
| `alertmanager.existingConfigmapKey` | The name of the key with the Alertmanager config file | `""` |
|
||||
| `alertmanager.command` | Override default container command (useful when using custom images) | `[]` |
|
||||
| `alertmanager.args` | Override default container args (useful when using custom images) | `[]` |
|
||||
| `alertmanager.extraArgs` | Additional arguments passed to the Prometheus server container | `[]` |
|
||||
| `alertmanager.hostAliases` | Alertmanager pods host aliases | `[]` |
|
||||
| `alertmanager.podLabels` | Extra labels for Alertmanager pods | `{}` |
|
||||
| `alertmanager.podAnnotations` | Annotations for Alertmanager pods | `{}` |
|
||||
| `alertmanager.podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
|
||||
| `alertmanager.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` |
|
||||
| `alertmanager.pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` |
|
||||
| `alertmanager.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` |
|
||||
| `alertmanager.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` |
|
||||
| `alertmanager.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
|
||||
| `alertmanager.nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set | `""` |
|
||||
| `alertmanager.nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set | `[]` |
|
||||
| `alertmanager.affinity` | Affinity for Alertmanager pods assignment | `{}` |
|
||||
| `alertmanager.nodeSelector` | Node labels for Alertmanager pods assignment | `{}` |
|
||||
| `alertmanager.tolerations` | Tolerations for Alertmanager pods assignment | `[]` |
|
||||
| `alertmanager.updateStrategy.type` | Alertmanager statefulset strategy type | `RollingUpdate` |
|
||||
| `alertmanager.podManagementPolicy` | Statefulset Pod management policy, it needs to be Parallel to be able to complete the cluster join | `OrderedReady` |
|
||||
| `alertmanager.priorityClassName` | Alertmanager pods' priorityClassName | `""` |
|
||||
| `alertmanager.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
|
||||
| `alertmanager.schedulerName` | Name of the k8s scheduler (other than default) for Alertmanager pods | `""` |
|
||||
| `alertmanager.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` |
|
||||
| `alertmanager.lifecycleHooks` | for the Alertmanager container(s) to automate configuration before or after startup | `{}` |
|
||||
| `alertmanager.extraEnvVars` | Array with extra environment variables to add to Alertmanager nodes | `[]` |
|
||||
| `alertmanager.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Alertmanager nodes | `""` |
|
||||
| `alertmanager.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Alertmanager nodes | `""` |
|
||||
| `alertmanager.extraVolumes` | Optionally specify extra list of additional volumes for the Alertmanager pod(s) | `[]` |
|
||||
| `alertmanager.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Alertmanager container(s) | `[]` |
|
||||
| `alertmanager.sidecars` | Add additional sidecar containers to the Alertmanager pod(s) | `[]` |
|
||||
| `alertmanager.initContainers` | Add additional init containers to the Alertmanager pod(s) | `[]` |
|
||||
| `alertmanager.ingress.enabled` | Enable ingress record generation for Alertmanager | `false` |
|
||||
| `alertmanager.ingress.pathType` | Ingress path type | `ImplementationSpecific` |
|
||||
| `alertmanager.ingress.hostname` | Default host for the ingress record | `alertmanager.prometheus.local` |
|
||||
| `alertmanager.ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` |
|
||||
| `alertmanager.ingress.path` | Default path for the ingress record | `/` |
|
||||
| `alertmanager.ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` |
|
||||
| `alertmanager.ingress.tls` | Enable TLS configuration for the host defined at `ingress.hostname` parameter | `false` |
|
||||
| `alertmanager.ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` |
|
||||
| `alertmanager.ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` |
|
||||
| `alertmanager.ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` |
|
||||
| `alertmanager.ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` |
|
||||
| `alertmanager.ingress.secrets` | Custom TLS certificates as secrets | `[]` |
|
||||
| `alertmanager.ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` |
|
||||
| `alertmanager.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
|
||||
| `alertmanager.serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
|
||||
| `alertmanager.serviceAccount.annotations` | Additional Service Account annotations (evaluated as a template) | `{}` |
|
||||
| `alertmanager.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `true` |
|
||||
| `alertmanager.service.type` | Alertmanager service type | `LoadBalancer` |
|
||||
| `alertmanager.service.ports.http` | Alertmanager service HTTP port | `80` |
|
||||
| `alertmanager.service.ports.cluster` | Alertmanager cluster HA port | `9094` |
|
||||
| `alertmanager.service.nodePorts.http` | Node port for HTTP | `""` |
|
||||
| `alertmanager.service.clusterIP` | Alertmanager service Cluster IP | `""` |
|
||||
| `alertmanager.service.loadBalancerIP` | Alertmanager service Load Balancer IP | `""` |
|
||||
| `alertmanager.service.loadBalancerSourceRanges` | Alertmanager service Load Balancer sources | `[]` |
|
||||
| `alertmanager.service.externalTrafficPolicy` | Alertmanager service external traffic policy | `Cluster` |
|
||||
| `alertmanager.service.annotations` | Additional custom annotations for Alertmanager service | `{}` |
|
||||
| `alertmanager.service.extraPorts` | Extra ports to expose in Alertmanager service (normally used with the `sidecars` value) | `[]` |
|
||||
| `alertmanager.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` |
|
||||
| `alertmanager.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
|
||||
| `alertmanager.persistence.enabled` | Enable Alertmanager data persistence using VolumeClaimTemplates | `false` |
|
||||
| `alertmanager.persistence.mountPath` | Path to mount the volume at. | `/bitnami/alertmanager/data` |
|
||||
| `alertmanager.persistence.subPath` | The subdirectory of the volume to mount to, useful in dev environments and one PV for multiple services | `""` |
|
||||
| `alertmanager.persistence.storageClass` | PVC Storage Class for Concourse worker data volume | `""` |
|
||||
| `alertmanager.persistence.accessModes` | PVC Access Mode for Concourse worker volume | `["ReadWriteOnce"]` |
|
||||
| `alertmanager.persistence.size` | PVC Storage Request for Concourse worker volume | `8Gi` |
|
||||
| `alertmanager.persistence.annotations` | Annotations for the PVC | `{}` |
|
||||
| `alertmanager.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` |
|
||||
|
||||
### Prometheus server Parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ----------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- |
|
||||
| `server.image.registry` | Prometheus image registry | `docker.io` |
|
||||
| `server.image.repository` | Prometheus image repository | `bitnami/prometheus` |
|
||||
| `server.image.tag` | Prometheus image tag (immutable tags are recommended) | `2.44.0-debian-11-r0` |
|
||||
| `server.image.digest` | Prometheus image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) | `""` |
|
||||
| `server.image.pullPolicy` | Prometheus image pull policy | `IfNotPresent` |
|
||||
| `server.image.pullSecrets` | Prometheus image pull secrets | `[]` |
|
||||
| `server.configuration` | Promethus configuration. This content will be stored in the the prometheus.yaml file and the content can be a template. | `""` |
|
||||
| `server.alertingRules` | Prometheus alerting rules. This content will be stored in the the rules.yaml file and the content can be a template. | `{}` |
|
||||
| `server.extraScrapeConfigs` | Promethus configuration, useful to declare new scrape_configs. This content will be merged with the 'server.configuration' value and stored in the the prometheus.yaml file. | `[]` |
|
||||
| `server.replicaCount` | Number of Prometheus replicas to deploy | `1` |
|
||||
| `server.containerPorts.http` | Prometheus HTTP container port | `9090` |
|
||||
| `server.livenessProbe.enabled` | Enable livenessProbe on Prometheus containers | `true` |
|
||||
| `server.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` |
|
||||
| `server.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` |
|
||||
| `server.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `3` |
|
||||
| `server.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` |
|
||||
| `server.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
|
||||
| `server.readinessProbe.enabled` | Enable readinessProbe on Prometheus containers | `true` |
|
||||
| `server.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
|
||||
| `server.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
|
||||
| `server.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `2` |
|
||||
| `server.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` |
|
||||
| `server.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
|
||||
| `server.startupProbe.enabled` | Enable startupProbe on Prometheus containers | `false` |
|
||||
| `server.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `2` |
|
||||
| `server.startupProbe.periodSeconds` | Period seconds for startupProbe | `5` |
|
||||
| `server.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `2` |
|
||||
| `server.startupProbe.failureThreshold` | Failure threshold for startupProbe | `10` |
|
||||
| `server.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
|
||||
| `server.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
|
||||
| `server.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
|
||||
| `server.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
|
||||
| `server.resources.limits` | The resources limits for the Prometheus containers | `{}` |
|
||||
| `server.resources.requests` | The requested resources for the Prometheus containers | `{}` |
|
||||
| `server.podSecurityContext.enabled` | Enabled Prometheus pods' Security Context | `true` |
|
||||
| `server.podSecurityContext.fsGroup` | Set Prometheus pod's Security Context fsGroup | `1001` |
|
||||
| `server.containerSecurityContext.enabled` | Enabled Prometheus containers' Security Context | `true` |
|
||||
| `server.containerSecurityContext.runAsUser` | Set Prometheus containers' Security Context runAsUser | `1001` |
|
||||
| `server.containerSecurityContext.runAsNonRoot` | Set Prometheus containers' Security Context runAsNonRoot | `true` |
|
||||
| `server.containerSecurityContext.readOnlyRootFilesystem` | Set Prometheus containers' Security Context runAsNonRoot | `false` |
|
||||
| `server.existingConfigmap` | The name of an existing ConfigMap with your custom configuration for Prometheus | `""` |
|
||||
| `server.existingConfigmapKey` | The name of the key with the Prometheus config file | `""` |
|
||||
| `server.command` | Override default container command (useful when using custom images) | `[]` |
|
||||
| `server.args` | Override default container args (useful when using custom images) | `[]` |
|
||||
| `server.extraArgs` | Additional arguments passed to the Prometheus server container | `[]` |
|
||||
| `server.hostAliases` | Prometheus pods host aliases | `[]` |
|
||||
| `server.podLabels` | Extra labels for Prometheus pods | `{}` |
|
||||
| `server.podAnnotations` | Annotations for Prometheus pods | `{}` |
|
||||
| `server.podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
|
||||
| `server.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` |
|
||||
| `server.pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` |
|
||||
| `server.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` |
|
||||
| `server.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` |
|
||||
| `server.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
|
||||
| `server.nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set | `""` |
|
||||
| `server.nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set | `[]` |
|
||||
| `server.affinity` | Affinity for Prometheus pods assignment | `{}` |
|
||||
| `server.nodeSelector` | Node labels for Prometheus pods assignment | `{}` |
|
||||
| `server.tolerations` | Tolerations for Prometheus pods assignment | `[]` |
|
||||
| `server.updateStrategy.type` | Prometheus deployment strategy type. If persistence is enabled, strategy type should be set to Recreate to avoid dead locks. | `RollingUpdate` |
|
||||
| `server.priorityClassName` | Prometheus pods' priorityClassName | `""` |
|
||||
| `server.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
|
||||
| `server.schedulerName` | Name of the k8s scheduler (other than default) for Prometheus pods | `""` |
|
||||
| `server.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` |
|
||||
| `server.lifecycleHooks` | for the Prometheus container(s) to automate configuration before or after startup | `{}` |
|
||||
| `server.extraEnvVars` | Array with extra environment variables to add to Prometheus nodes | `[]` |
|
||||
| `server.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Prometheus nodes | `""` |
|
||||
| `server.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Prometheus nodes | `""` |
|
||||
| `server.extraVolumes` | Optionally specify extra list of additional volumes for the Prometheus pod(s) | `[]` |
|
||||
| `server.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Prometheus container(s) | `[]` |
|
||||
| `server.sidecars` | Add additional sidecar containers to the Prometheus pod(s) | `[]` |
|
||||
| `server.initContainers` | Add additional init containers to the Prometheus pod(s) | `[]` |
|
||||
| `server.routePrefix` | Prefix for the internal routes of web endpoints | `/` |
|
||||
| `server.remoteWrite` | The remote_write spec configuration for Prometheus | `[]` |
|
||||
| `server.scrapeInterval` | Interval between consecutive scrapes. Example: "1m" | `""` |
|
||||
| `server.scrapeTimeout` | Interval between consecutive scrapes. Example: "10s" | `""` |
|
||||
| `server.evaluationInterval` | Interval between consecutive evaluations. Example: "1m" | `""` |
|
||||
| `server.enableAdminAPI` | Enable Prometheus adminitrative API | `false` |
|
||||
| `server.enableRemoteWriteReceiver` | Enable Prometheus to be used as a receiver for the Prometheus remote write protocol. | `false` |
|
||||
| `server.enableFeatures` | Enable access to Prometheus disabled features. | `[]` |
|
||||
| `server.logLevel` | Log level for Prometheus | `info` |
|
||||
| `server.logFormat` | Log format for Prometheus | `logfmt` |
|
||||
| `server.retention` | Metrics retention days | `10d` |
|
||||
| `server.retentionSize` | Maximum size of metrics | `0` |
|
||||
| `server.alertingEndpoints` | Alertmanagers to which alerts will be sent | `[]` |
|
||||
| `server.externalLabels` | External labels to add to any time series or alerts when communicating with external systems | `{}` |
|
||||
| `server.thanos.create` | Create a Thanos sidecar container | `false` |
|
||||
| `server.thanos.image.registry` | Thanos image registry | `docker.io` |
|
||||
| `server.thanos.image.repository` | Thanos image name | `bitnami/thanos` |
|
||||
| `server.thanos.image.tag` | Thanos image tag | `0.31.0-scratch-r3` |
|
||||
| `server.thanos.image.digest` | Thanos image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `server.thanos.image.pullPolicy` | Thanos image pull policy | `IfNotPresent` |
|
||||
| `server.thanos.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
|
||||
| `server.thanos.containerSecurityContext.enabled` | Enable container security context | `true` |
|
||||
| `server.thanos.containerSecurityContext.readOnlyRootFilesystem` | mount / (root) as a readonly filesystem | `false` |
|
||||
| `server.thanos.containerSecurityContext.allowPrivilegeEscalation` | Switch privilegeEscalation possibility on or off | `false` |
|
||||
| `server.thanos.containerSecurityContext.runAsNonRoot` | Force the container to run as a non root user | `true` |
|
||||
| `server.thanos.containerSecurityContext.capabilities.drop` | Linux Kernel capabilities which should be dropped | `[]` |
|
||||
| `server.thanos.prometheusUrl` | Override default prometheus url `http://localhost:9090` | `""` |
|
||||
| `server.thanos.extraArgs` | Additional arguments passed to the thanos sidecar container | `[]` |
|
||||
| `server.thanos.objectStorageConfig.secretName` | Support mounting a Secret for the objectStorageConfig of the sideCar container. | `""` |
|
||||
| `server.thanos.objectStorageConfig.secretKey` | Secret key with the configuration file. | `thanos.yaml` |
|
||||
| `server.thanos.extraVolumeMounts` | Additional volumeMounts from `server.volumes` for thanos sidecar container | `[]` |
|
||||
| `server.thanos.resources.limits` | The resources limits for the Thanos sidecar container | `{}` |
|
||||
| `server.thanos.resources.requests` | The resources requests for the Thanos sidecar container | `{}` |
|
||||
| `server.thanos.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
|
||||
| `server.thanos.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `0` |
|
||||
| `server.thanos.livenessProbe.periodSeconds` | How often to perform the probe | `5` |
|
||||
| `server.thanos.livenessProbe.timeoutSeconds` | When the probe times out | `3` |
|
||||
| `server.thanos.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe | `120` |
|
||||
| `server.thanos.livenessProbe.successThreshold` | Minimum consecutive successes for the probe | `1` |
|
||||
| `server.thanos.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
|
||||
| `server.thanos.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `0` |
|
||||
| `server.thanos.readinessProbe.periodSeconds` | How often to perform the probe | `5` |
|
||||
| `server.thanos.readinessProbe.timeoutSeconds` | When the probe times out | `3` |
|
||||
| `server.thanos.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe | `120` |
|
||||
| `server.thanos.readinessProbe.successThreshold` | Minimum consecutive successes for the probe | `1` |
|
||||
| `server.thanos.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
|
||||
| `server.thanos.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
|
||||
| `server.thanos.service.type` | Kubernetes service type | `ClusterIP` |
|
||||
| `server.thanos.service.ports.grpc` | Thanos service port | `10901` |
|
||||
| `server.thanos.service.clusterIP` | Specific cluster IP when service type is cluster IP. Use `None` to create headless service by default. | `None` |
|
||||
| `server.thanos.service.nodePorts.grpc` | Specify the nodePort value for the LoadBalancer and NodePort service types. | `""` |
|
||||
| `server.thanos.service.loadBalancerIP` | `loadBalancerIP` if service type is `LoadBalancer` | `""` |
|
||||
| `server.thanos.service.loadBalancerSourceRanges` | Address that are allowed when svc is `LoadBalancer` | `[]` |
|
||||
| `server.thanos.service.annotations` | Additional annotations for Prometheus service | `{}` |
|
||||
| `server.thanos.service.extraPorts` | Additional ports to expose from the Thanos sidecar container | `[]` |
|
||||
| `server.thanos.service.externalTrafficPolicy` | Prometheus service external traffic policy | `Cluster` |
|
||||
| `server.thanos.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
|
||||
| `server.thanos.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
|
||||
| `server.thanos.ingress.enabled` | Enable ingress controller resource | `false` |
|
||||
| `server.thanos.ingress.pathType` | Ingress path type | `ImplementationSpecific` |
|
||||
| `server.thanos.ingress.hostname` | Default host for the ingress record | `thanos.prometheus.local` |
|
||||
| `server.thanos.ingress.path` | Default path for the ingress record | `/` |
|
||||
| `server.thanos.ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` |
|
||||
| `server.thanos.ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` |
|
||||
| `server.thanos.ingress.tls` | Enable TLS configuration for the host defined at `ingress.hostname` parameter | `false` |
|
||||
| `server.thanos.ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` |
|
||||
| `server.thanos.ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` |
|
||||
| `server.thanos.ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` |
|
||||
| `server.thanos.ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` |
|
||||
| `server.thanos.ingress.secrets` | Custom TLS certificates as secrets | `[]` |
|
||||
| `server.thanos.ingress.extraRules` | The list of additional rules to be added to this ingress record. Evaluated as a template | `[]` |
|
||||
| `server.ingress.enabled` | Enable ingress record generation for Prometheus | `false` |
|
||||
| `server.ingress.pathType` | Ingress path type | `ImplementationSpecific` |
|
||||
| `server.ingress.hostname` | Default host for the ingress record | `server.prometheus.local` |
|
||||
| `server.ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` |
|
||||
| `server.ingress.path` | Default path for the ingress record | `/` |
|
||||
| `server.ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` |
|
||||
| `server.ingress.tls` | Enable TLS configuration for the host defined at `ingress.hostname` parameter | `false` |
|
||||
| `server.ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` |
|
||||
| `server.ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` |
|
||||
| `server.ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` |
|
||||
| `server.ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` |
|
||||
| `server.ingress.secrets` | Custom TLS certificates as secrets | `[]` |
|
||||
| `server.ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` |
|
||||
| `server.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
|
||||
| `server.serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
|
||||
| `server.serviceAccount.annotations` | Additional Service Account annotations (evaluated as a template) | `{}` |
|
||||
| `server.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `true` |
|
||||
| `server.service.type` | Prometheus service type | `LoadBalancer` |
|
||||
| `server.service.ports.http` | Prometheus service HTTP port | `80` |
|
||||
| `server.service.nodePorts.http` | Node port for HTTP | `""` |
|
||||
| `server.service.clusterIP` | Prometheus service Cluster IP | `""` |
|
||||
| `server.service.loadBalancerIP` | Prometheus service Load Balancer IP | `""` |
|
||||
| `server.service.loadBalancerSourceRanges` | Prometheus service Load Balancer sources | `[]` |
|
||||
| `server.service.externalTrafficPolicy` | Prometheus service external traffic policy | `Cluster` |
|
||||
| `server.service.annotations` | Additional custom annotations for Prometheus service | `{}` |
|
||||
| `server.service.extraPorts` | Extra ports to expose in Prometheus service (normally used with the `sidecars` value) | `[]` |
|
||||
| `server.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin. ClientIP by default. | `ClientIP` |
|
||||
| `server.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
|
||||
| `server.persistence.enabled` | Enable persistence using Persistent Volume Claims. If you have multiple instances (server.repicacount > 1), please considere using an external storage service like Thanos or Grafana Mimir | `false` |
|
||||
| `server.persistence.mountPath` | Path to mount the volume at. | `/bitnami/prometheus/data` |
|
||||
| `server.persistence.subPath` | The subdirectory of the volume to mount to, useful in dev environments and one PV for multiple services | `""` |
|
||||
| `server.persistence.storageClass` | Storage class of backing PVC | `""` |
|
||||
| `server.persistence.annotations` | Persistent Volume Claim annotations | `{}` |
|
||||
| `server.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` |
|
||||
| `server.persistence.size` | Size of data volume | `8Gi` |
|
||||
| `server.persistence.existingClaim` | The name of an existing PVC to use for persistence | `""` |
|
||||
| `server.persistence.selector` | Selector to match an existing Persistent Volume for WordPress data PVC | `{}` |
|
||||
| `server.persistence.dataSource` | Custom PVC data source | `{}` |
|
||||
| `server.rbac.create` | Specifies whether RBAC resources should be created | `true` |
|
||||
| `server.rbac.rules` | Custom RBAC rules to set | `[]` |
|
||||
|
||||
### Init Container Parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------------------------------------ | ----------------------------------------------------------------------------------------------- | ----------------------- |
|
||||
| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` |
|
||||
| `volumePermissions.image.registry` | Bitnami Shell image registry | `docker.io` |
|
||||
| `volumePermissions.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` |
|
||||
| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r99` |
|
||||
| `volumePermissions.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` |
|
||||
| `volumePermissions.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` |
|
||||
| `volumePermissions.resources.limits` | The resources limits for the init container | `{}` |
|
||||
| `volumePermissions.resources.requests` | The requested resources for the init container | `{}` |
|
||||
| `volumePermissions.containerSecurityContext.runAsUser` | Set init container's Security Context runAsUser | `0` |
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
helm install my-release --set alertmanager.enabled=true \
|
||||
oci://registry-1.docker.io/bitnamicharts/prometheus
|
||||
```
|
||||
|
||||
The above command install Prometheus chart with Alertmanager.
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
helm install my-release -f values.yaml oci://registry-1.docker.io/bitnamicharts/prometheus
|
||||
```
|
||||
|
||||
> **Tip**: You can use the default [values.yaml](values.yaml)
|
||||
|
||||
## Configuration and installation details
|
||||
|
||||
### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
|
||||
|
||||
It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
|
||||
|
||||
Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
|
||||
|
||||
### Deploy extra resources
|
||||
|
||||
There are cases where you may want to deploy extra objects, such a ConfigMap containing your app's configuration or some extra deployment with a micro service used by your app. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter.
|
||||
|
||||
### Setting Pod's affinity
|
||||
|
||||
This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
|
||||
|
||||
As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters.
|
||||
|
||||
### Integrate Prometheus and Alertmanager with Thanos
|
||||
|
||||
You can integrate Prometheus & Alertmanager with Thanos using this chart and the [Bitnami Thanos chart](https://github.com/bitnami/charts/tree/main/bitnami/thanos) following the steps below:
|
||||
|
||||
> Note: in this example we will use MinIO® (subchart) as the Objstore. Every component will be deployed in the "monitoring" namespace.
|
||||
|
||||
- Create a **values.yaml** like the one below for Thanos:
|
||||
|
||||
```yaml
|
||||
objstoreConfig: |-
|
||||
type: s3
|
||||
config:
|
||||
bucket: thanos
|
||||
endpoint: {{ include "thanos.minio.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:9000
|
||||
access_key: minio
|
||||
secret_key: minio123
|
||||
insecure: true
|
||||
query:
|
||||
dnsDiscovery:
|
||||
sidecarsService: prometheus-thanos
|
||||
sidecarsNamespace: monitoring
|
||||
bucketweb:
|
||||
enabled: true
|
||||
compactor:
|
||||
enabled: true
|
||||
storegateway:
|
||||
enabled: true
|
||||
ruler:
|
||||
enabled: true
|
||||
alertmanagers:
|
||||
- http://prometheus-alertmanager.monitoring.svc.cluster.local:9093
|
||||
config: |-
|
||||
groups:
|
||||
- name: "metamonitoring"
|
||||
rules:
|
||||
- alert: "PrometheusDown"
|
||||
expr: absent(up{prometheus="monitoring/prometheus"})
|
||||
metrics:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
minio:
|
||||
enabled: true
|
||||
auth:
|
||||
rootPassword: minio123
|
||||
rootUser: minio
|
||||
monitoringBuckets: thanos
|
||||
accessKey:
|
||||
password: minio
|
||||
secretKey:
|
||||
password: minio123
|
||||
```
|
||||
|
||||
- Install Prometheus and Thanos charts:
|
||||
|
||||
For Helm 3:
|
||||
|
||||
```console
|
||||
kubectl create namespace monitoring
|
||||
helm install prometheus \
|
||||
--set prometheus.thanos.create=true \
|
||||
--namespace monitoring \
|
||||
oci://registry-1.docker.io/bitnamicharts/prometheus
|
||||
helm install thanos \
|
||||
--values values.yaml \
|
||||
--namespace monitoring \
|
||||
oci://registry-1.docker.io/bitnamicharts/thanos
|
||||
```
|
||||
|
||||
That's all! Now you have Thanos fully integrated with Prometheus and Alertmanager.
|
||||
|
||||
### Integrate Prometheus with Grafana Mimir
|
||||
|
||||
You can integrate Prometheus with Grafana Mimir using this chart and the [Bitnami Grafana Mimir chart](https://github.com/bitnami/charts/tree/main/bitnami/grafana-mimir) adding a `remoteWrite` entry:
|
||||
|
||||
- Create a **values.yaml** like the one below for Prometheus:
|
||||
|
||||
```yaml
|
||||
server:
|
||||
remoteWrite:
|
||||
- url: http://grafana-mimir-gateway.svc.cluster.local/api/v1/push
|
||||
headers:
|
||||
X-Scope-OrgID: demo
|
||||
```
|
||||
|
||||
- Install Prometheus and Grafana Mimir charts:
|
||||
|
||||
For Helm 3:
|
||||
|
||||
```console
|
||||
kubectl create namespace monitoring
|
||||
helm install prometheus \
|
||||
--values values.yaml \
|
||||
--namespace monitoring \
|
||||
oci://registry-1.docker.io/bitnamicharts/prometheus
|
||||
helm install grafana-mimir \
|
||||
oci://registry-1.docker.io/bitnamicharts/grafana-mimir
|
||||
```
|
||||
|
||||
That's all! Now you have Prometheus integrated with Grafana Mimir.
|
||||
|
||||
### Integrate Prometheus with Grafana
|
||||
|
||||
You can integrate Prometheus with Grafana Dashboard using this chart and the [Bitnami Grafana chart](https://github.com/bitnami/charts/tree/main/bitnami/grafana) just adding the prometheus datasources:
|
||||
|
||||
- Create a **values.yaml** like the one below for Grafana:
|
||||
|
||||
```yaml
|
||||
datasources:
|
||||
secretDefinition:
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
orgId: 1
|
||||
url: http://prometheus.monitoring.svc.cluster.local
|
||||
version: 1
|
||||
editable: true
|
||||
isDefault: true
|
||||
- name: Alertmanager
|
||||
uid: alertmanager
|
||||
type: alertmanager
|
||||
access: proxy
|
||||
orgId: 1
|
||||
url: http://prometheus-alertmanager.monitoring.svc.cluster.local:9093
|
||||
version: 1
|
||||
editable: true
|
||||
```
|
||||
|
||||
- Install Prometheus and Grafana charts:
|
||||
|
||||
For Helm 3:
|
||||
|
||||
```console
|
||||
kubectl create namespace monitoring
|
||||
helm install prometheus \
|
||||
--namespace monitoring \
|
||||
oci://registry-1.docker.io/bitnamicharts/prometheus
|
||||
helm install grafana-mimir \
|
||||
--values values.yaml \
|
||||
--namespace monitoring \
|
||||
oci://registry-1.docker.io/bitnamicharts/grafana
|
||||
```
|
||||
|
||||
### How to add new targets
|
||||
|
||||
By default this helm chart will monitor its own targets: prometheus and alertmanager. Additional ones can be added setting a list with the [scrape_configs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) in the value `server.extraScrapeConfigs`. Here there is a simple example for wordpress (deployed in the default namespace):
|
||||
|
||||
```yaml
|
||||
server:
|
||||
extraScrapeConfigs:
|
||||
- job_name: wordpress
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- default
|
||||
metrics_path: /metrics
|
||||
relabel_configs:
|
||||
- source_labels:
|
||||
- job
|
||||
target_label: __tmp_wordpress_job_name
|
||||
- action: keep
|
||||
source_labels:
|
||||
- __meta_kubernetes_service_label_app_kubernetes_io_instance
|
||||
- __meta_kubernetes_service_labelpresent_app_kubernetes_io_instance
|
||||
regex: (wordpress);true
|
||||
- action: keep
|
||||
source_labels:
|
||||
- __meta_kubernetes_service_label_app_kubernetes_io_name
|
||||
- __meta_kubernetes_service_labelpresent_app_kubernetes_io_name
|
||||
regex: (wordpress);true
|
||||
- action: keep
|
||||
source_labels:
|
||||
- __meta_kubernetes_endpoint_port_name
|
||||
regex: metrics
|
||||
- source_labels:
|
||||
- __meta_kubernetes_endpoint_address_target_kind
|
||||
- __meta_kubernetes_endpoint_address_target_name
|
||||
separator: ;
|
||||
regex: Node;(.*)
|
||||
replacement: ${1}
|
||||
target_label: node
|
||||
- source_labels:
|
||||
- __meta_kubernetes_endpoint_address_target_kind
|
||||
- __meta_kubernetes_endpoint_address_target_name
|
||||
separator: ;
|
||||
regex: Pod;(.*)
|
||||
replacement: ${1}
|
||||
target_label: pod
|
||||
- source_labels:
|
||||
- __meta_kubernetes_namespace
|
||||
target_label: namespace
|
||||
- source_labels:
|
||||
- __meta_kubernetes_service_name
|
||||
target_label: service
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_name
|
||||
target_label: pod
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_container_name
|
||||
target_label: container
|
||||
- action: drop
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_phase
|
||||
regex: (Failed|Succeeded)
|
||||
- source_labels:
|
||||
- __meta_kubernetes_service_name
|
||||
target_label: job
|
||||
replacement: ${1}
|
||||
- target_label: endpoint
|
||||
replacement: metrics
|
||||
- source_labels:
|
||||
- __address__
|
||||
target_label: __tmp_hash
|
||||
modulus: 1
|
||||
action: hashmod
|
||||
- source_labels:
|
||||
- __tmp_hash
|
||||
regex: 0
|
||||
action: keep
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
|
||||
|
||||
## License
|
||||
|
||||
Copyright © 2023 Bitnami
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
<http://www.apache.org/licenses/LICENSE-2.0>
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
123
bitnami/prometheus/templates/NOTES.txt
Normal file
123
bitnami/prometheus/templates/NOTES.txt
Normal file
@@ -0,0 +1,123 @@
|
||||
CHART NAME: {{ .Chart.Name }}
|
||||
CHART VERSION: {{ .Chart.Version }}
|
||||
APP VERSION: {{ .Chart.AppVersion }}
|
||||
|
||||
** Please be patient while the chart is being deployed **
|
||||
|
||||
{{- if .Values.diagnosticMode.enabled }}
|
||||
The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with:
|
||||
|
||||
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }}
|
||||
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }}
|
||||
|
||||
Get the list of pods by executing:
|
||||
|
||||
kubectl get pods --namespace {{ include "common.names.namespace" . | quote }} -l app.kubernetes.io/instance={{ .Release.Name }}
|
||||
|
||||
Access the pod you want to debug by executing
|
||||
|
||||
kubectl exec --namespace {{ include "common.names.namespace" . | quote }} -ti <NAME OF THE POD> -- bash
|
||||
|
||||
In order to replicate the container startup scripts execute this command:
|
||||
|
||||
/opt/bitnami/prometheus/bin/prometheus --config.file=/opt/bitnami/prometheus/conf/prometheus.yml --storage.tsdb.path=/opt/bitnami/prometheus/data --web.console.libraries=/opt/bitnami/prometheus/conf/console_libraries --web.console.templates=/opt/bitnami/prometheus/conf/consoles
|
||||
|
||||
{{- else }}
|
||||
|
||||
Prometheus can be accessed via port "{{ .Values.server.service.ports.http }}" on the following DNS name from within your cluster:
|
||||
|
||||
{{ template "common.names.fullname" . }}.{{ include "common.names.namespace" . }}.svc.cluster.local
|
||||
|
||||
To access Prometheus from outside the cluster execute the following commands:
|
||||
|
||||
{{- if .Values.server.ingress.enabled }}
|
||||
|
||||
You should be able to access your new Prometheus installation through
|
||||
|
||||
{{ ternary "https" "http" .Values.server.ingress.tls }}://{{ .Values.server.ingress.hostname }}
|
||||
|
||||
{{- else if contains "LoadBalancer" .Values.server.service.type }}
|
||||
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
Watch the status with: 'kubectl get svc --namespace {{ include "common.names.namespace" . }} -w {{ template "common.names.fullname" . }}'
|
||||
|
||||
{{- $port:=.Values.server.service.ports.http | toString }}
|
||||
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.names.namespace" . }} {{ template "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
|
||||
echo "Prometheus URL: http://$SERVICE_IP{{- if ne $port "80" }}:{{ .Values.server.service.ports.http }}{{ end }}/"
|
||||
|
||||
{{- else if contains "ClusterIP" .Values.server.service.type }}
|
||||
|
||||
echo "Prometheus URL: http://127.0.0.1:9090/"
|
||||
kubectl port-forward --namespace {{ include "common.names.namespace" . }} svc/{{ template "common.names.fullname" . }} 9090:{{ .Values.server.service.ports.http }}
|
||||
|
||||
{{- else if contains "NodePort" .Values.server.service.type }}
|
||||
|
||||
export NODE_PORT=$(kubectl get --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo "Prometheus URL: http://$NODE_IP:$NODE_PORT/"
|
||||
|
||||
{{- end }}
|
||||
|
||||
{{- if and .Values.server.thanos.create }}
|
||||
|
||||
Thanos Sidecar can be accessed via port "{{ .Values.server.thanos.service.ports.grpc }}" on the following DNS name from within your cluster:
|
||||
|
||||
{{ template "prometheus.thanos-sidecar.fullname" . }}.{{ include "common.names.namespace" . }}.svc.cluster.local
|
||||
|
||||
{{- if .Values.server.thanos.ingress.enabled }}
|
||||
|
||||
You should be able to access your new Thanos Sidecar installation through
|
||||
|
||||
{{ ternary "https" "http" .Values.server.thanos.ingress.tls }}://{{ .Values.server.thanos.ingress.hostname }}
|
||||
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.alertmanager.enabled }}
|
||||
|
||||
Watch the Alertmanager StatefulSet status using the command:
|
||||
|
||||
kubectl get sts -w --namespace {{ include "common.names.namespace" . }} -l app.kubernetes.io/name={{ template "prometheus.alertmanager.fullname" . }},app.kubernetes.io/instance={{ .Release.Name }}
|
||||
|
||||
Alertmanager can be accessed via port "{{ .Values.alertmanager.service.ports.http }}" on the following DNS name from within your cluster:
|
||||
|
||||
{{ template "prometheus.alertmanager.fullname" . }}.{{ include "common.names.namespace" . }}.svc.cluster.local
|
||||
|
||||
To access Alertmanager from outside the cluster execute the following commands:
|
||||
|
||||
{{- if .Values.alertmanager.ingress.enabled }}
|
||||
|
||||
You should be able to access your new Prometheus installation through
|
||||
|
||||
{{ ternary "https" "http" .Values.alertmanager.ingress.tls }}://{{ .Values.alertmanager.ingress.hostname }}
|
||||
|
||||
{{- else if contains "LoadBalancer" .Values.alertmanager.service.type }}
|
||||
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
Watch the status with: 'kubectl get svc --namespace {{ include "common.names.namespace" . }} -w {{ template "prometheus.alertmanager.fullname" . }}'
|
||||
|
||||
{{- $port:=.Values.alertmanager.service.ports.http | toString }}
|
||||
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.names.namespace" . }} {{ template "prometheus.alertmanager.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
|
||||
echo "Alertmanager URL: http://$SERVICE_IP{{- if ne $port "80" }}:{{ .Values.alertmanager.service.ports.http }}{{ end }}/"
|
||||
|
||||
{{- else if contains "ClusterIP" .Values.alertmanager.service.type }}
|
||||
|
||||
echo "Alertmanager URL: http://127.0.0.1:9093/"
|
||||
kubectl port-forward --namespace {{ include "common.names.namespace" . }} svc/{{ template "prometheus.alertmanager.fullname" . }} 9093:{{ .Values.alertmanager.service.ports.http }}
|
||||
|
||||
{{- else if contains "NodePort" .Values.alertmanager.service.type }}
|
||||
|
||||
export NODE_PORT=$(kubectl get --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.alertmanager.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo "Alertmanager URL: http://$NODE_IP:$NODE_PORT/"
|
||||
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- include "common.warnings.rollingTag" .Values.server.image }}
|
||||
{{- include "common.warnings.rollingTag" .Values.server.thanos.image }}
|
||||
{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }}
|
||||
{{- include "prometheus.server.validateValues" . }}
|
||||
151
bitnami/prometheus/templates/_helpers.tpl
Normal file
151
bitnami/prometheus/templates/_helpers.tpl
Normal file
@@ -0,0 +1,151 @@
|
||||
{{/*
|
||||
Return the proper image name
|
||||
*/}}
|
||||
{{- define "prometheus.server.image" -}}
|
||||
{{ include "common.images.image" (dict "imageRoot" .Values.server.image "global" .Values.global) }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name
|
||||
*/}}
|
||||
{{- define "prometheus.alertmanager.image" -}}
|
||||
{{ include "common.images.image" (dict "imageRoot" .Values.alertmanager.image "global" .Values.global) }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name
|
||||
*/}}
|
||||
{{- define "prometheus.server.thanosImage" -}}
|
||||
{{ include "common.images.image" (dict "imageRoot" .Values.server.thanos.image "global" .Values.global) }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return Prometheus server name
|
||||
*/}}
|
||||
{{- define "prometheus.server.fullname" -}}
|
||||
{{- printf "%s-server" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return Prometheus server name
|
||||
*/}}
|
||||
{{- define "prometheus.server.fullname.namespace" -}}
|
||||
{{- printf "%s-server" (include "common.names.fullname.namespace" .) | trunc 63 | trimSuffix "-" }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the init container volume-permissions image)
|
||||
*/}}
|
||||
{{- define "prometheus.volumePermissions.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.volumePermissions.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper Docker Image Registry Secret Names
|
||||
*/}}
|
||||
{{- define "prometheus.imagePullSecrets" -}}
|
||||
{{- include "common.images.pullSecrets" (dict "images" (list .Values.server.image .Values.volumePermissions.image .Values.server.thanos.image .Values.alertmanager.image) "global" .Values.global) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "prometheus.server.serviceAccountName" -}}
|
||||
{{- if .Values.server.serviceAccount.create -}}
|
||||
{{ default (include "prometheus.server.fullname" .) .Values.server.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.server.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Compile all warnings into a single message.
|
||||
*/}}
|
||||
{{- define "prometheus.server.validateValues" -}}
|
||||
{{- $messages := list -}}
|
||||
{{- $messages := append $messages (include "prometheus.server.validateValues.thanosObjectStorageConfig" .) -}}
|
||||
{{- $messages := without $messages "" -}}
|
||||
{{- $message := join "\n" $messages -}}
|
||||
|
||||
{{- if $message -}}
|
||||
{{- printf "\nVALUES VALIDATION:\n%s" $message -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Validate thanos objectStorageConfig.
|
||||
*/}}
|
||||
{{- define "prometheus.server.validateValues.thanosObjectStorageConfig" -}}
|
||||
{{- if (and .Values.server.thanos.objectStorageConfig (or (not (hasKey .Values.server.thanos.objectStorageConfig "secretKey")) (not (hasKey .Values.server.thanos.objectStorageConfig "secretName")) ))}}
|
||||
{{- printf "'server.thanos.objectStorageConfig.secretKey' and 'server.thanos.objectStorageConfi.secretName' are mandatory" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Get the Prometheus configuration configmap.
|
||||
*/}}
|
||||
{{- define "prometheus.server.configmapName" -}}
|
||||
{{- if .Values.server.existingConfigmap -}}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.existingConfigmap "context" .) -}}
|
||||
{{- else }}
|
||||
{{- include "prometheus.server.fullname" . -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Get the Prometheus configuration configmap key.
|
||||
*/}}
|
||||
{{- define "prometheus.server.configmapKey" -}}
|
||||
{{- if .Values.server.existingConfigmapKey -}}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.existingConfigmapKey "context" .) -}}
|
||||
{{- else }}
|
||||
{{- printf "prometheus.yaml" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Get the Prometheus Alertmanager configuration configmap key.
|
||||
*/}}
|
||||
{{- define "prometheus.alertmanager.configmapKey" -}}
|
||||
{{- if .Values.alertmanager.existingConfigmapKey -}}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.existingConfigmapKey "context" .) -}}
|
||||
{{- else }}
|
||||
{{- printf "alertmanager.yaml" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use in alertmanager
|
||||
*/}}
|
||||
{{- define "prometheus.alertmanager.serviceAccountName" -}}
|
||||
{{- if .Values.alertmanager.serviceAccount.create -}}
|
||||
{{ default (include "prometheus.alertmanager.fullname" .) .Values.alertmanager.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.alertmanager.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return Thanos sidecar service/ingress name
|
||||
*/}}
|
||||
{{- define "prometheus.thanos-sidecar.fullname" -}}
|
||||
{{- printf "%s-thanos" (include "common.names.fullname" .) }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return Alertmanager name
|
||||
*/}}
|
||||
{{- define "prometheus.alertmanager.fullname" -}}
|
||||
{{- printf "%s-alertmanager" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Get the Alertmanager configuration configmap.
|
||||
*/}}
|
||||
{{- define "prometheus.alertmanager.configmapName" -}}
|
||||
{{- if .Values.alertmanager.existingConfigmap -}}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.existingConfigmap "context" .) -}}
|
||||
{{- else }}
|
||||
{{- include "prometheus.alertmanager.fullname" . -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
81
bitnami/prometheus/templates/_scrape_config.tpl
Normal file
81
bitnami/prometheus/templates/_scrape_config.tpl
Normal file
@@ -0,0 +1,81 @@
|
||||
{{/*
|
||||
Return the prometheus scrape configuration for kubernetes objects.
|
||||
Usage:
|
||||
{{ include "prometheus.scrape_config" (dict "component" "alertmanager" "context" $) }}
|
||||
*/}}
|
||||
{{- define "prometheus.scrape_config" -}}
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- {{ include "common.names.namespace" .context }}
|
||||
metrics_path: /metrics
|
||||
relabel_configs:
|
||||
- source_labels:
|
||||
- job
|
||||
target_label: __tmp_prometheus_job_name
|
||||
- action: keep
|
||||
source_labels:
|
||||
- __meta_kubernetes_service_label_app_kubernetes_io_component
|
||||
- __meta_kubernetes_service_labelpresent_app_kubernetes_io_component
|
||||
regex: ({{ .component }});true
|
||||
- action: keep
|
||||
source_labels:
|
||||
- __meta_kubernetes_service_label_app_kubernetes_io_instance
|
||||
- __meta_kubernetes_service_labelpresent_app_kubernetes_io_instance
|
||||
regex: ({{ .context.Release.Name }});true
|
||||
- action: keep
|
||||
source_labels:
|
||||
- __meta_kubernetes_service_label_app_kubernetes_io_name
|
||||
- __meta_kubernetes_service_labelpresent_app_kubernetes_io_name
|
||||
regex: (prometheus);true
|
||||
- action: keep
|
||||
source_labels:
|
||||
- __meta_kubernetes_endpoint_port_name
|
||||
regex: http
|
||||
- source_labels:
|
||||
- __meta_kubernetes_endpoint_address_target_kind
|
||||
- __meta_kubernetes_endpoint_address_target_name
|
||||
separator: ;
|
||||
regex: Node;(.*)
|
||||
replacement: ${1}
|
||||
target_label: node
|
||||
- source_labels:
|
||||
- __meta_kubernetes_endpoint_address_target_kind
|
||||
- __meta_kubernetes_endpoint_address_target_name
|
||||
separator: ;
|
||||
regex: Pod;(.*)
|
||||
replacement: ${1}
|
||||
target_label: pod
|
||||
- source_labels:
|
||||
- __meta_kubernetes_namespace
|
||||
target_label: namespace
|
||||
- source_labels:
|
||||
- __meta_kubernetes_service_name
|
||||
target_label: service
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_name
|
||||
target_label: pod
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_container_name
|
||||
target_label: container
|
||||
- action: drop
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_phase
|
||||
regex: (Failed|Succeeded)
|
||||
- source_labels:
|
||||
- __meta_kubernetes_service_name
|
||||
target_label: job
|
||||
replacement: ${1}
|
||||
- target_label: endpoint
|
||||
replacement: http
|
||||
- source_labels:
|
||||
- __address__
|
||||
target_label: __tmp_hash
|
||||
modulus: 1
|
||||
action: hashmod
|
||||
- source_labels:
|
||||
- __tmp_hash
|
||||
regex: 0
|
||||
action: keep
|
||||
{{- end -}}
|
||||
19
bitnami/prometheus/templates/alertmanager/configmap.yaml
Normal file
19
bitnami/prometheus/templates/alertmanager/configmap.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
{{- if and .Values.alertmanager.enabled (not .Values.alertmanager.existingConfigmap) }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "prometheus.alertmanager.fullname" . | quote }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: alertmanager
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
data:
|
||||
{{ include "prometheus.alertmanager.configmapKey" . }}:
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.configuration "context" $) | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
64
bitnami/prometheus/templates/alertmanager/ingress.yaml
Normal file
64
bitnami/prometheus/templates/alertmanager/ingress.yaml
Normal file
@@ -0,0 +1,64 @@
|
||||
{{- if and .Values.alertmanager.enabled .Values.alertmanager.ingress.enabled }}
|
||||
apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ include "prometheus.alertmanager.fullname" . | quote }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: alertmanager
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.alertmanager.ingress.annotations .Values.commonAnnotations }}
|
||||
annotations:
|
||||
{{- if .Values.alertmanager.ingress.annotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.alertmanager.ingress.annotations "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if and .Values.alertmanager.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }}
|
||||
ingressClassName: {{ .Values.alertmanager.ingress.ingressClassName | quote }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- if .Values.alertmanager.ingress.hostname }}
|
||||
- host: {{ .Values.alertmanager.ingress.hostname }}
|
||||
http:
|
||||
paths:
|
||||
{{- if .Values.alertmanager.ingress.extraPaths }}
|
||||
{{- toYaml .Values.alertmanager.ingress.extraPaths | nindent 10 }}
|
||||
{{- end }}
|
||||
- path: {{ .Values.alertmanager.ingress.path }}
|
||||
{{- if eq "true" (include "common.ingress.supportsPathType" .) }}
|
||||
pathType: {{ .Values.alertmanager.ingress.pathType }}
|
||||
{{- end }}
|
||||
backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" "http" "context" $) | nindent 14 }}
|
||||
{{- end }}
|
||||
{{- range .Values.alertmanager.ingress.extraHosts }}
|
||||
- host: {{ .name | quote }}
|
||||
http:
|
||||
paths:
|
||||
- path: {{ default "/" .path }}
|
||||
{{- if eq "true" (include "common.ingress.supportsPathType" $) }}
|
||||
pathType: {{ default "ImplementationSpecific" .pathType }}
|
||||
{{- end }}
|
||||
backend: {{- include "common.ingress.backend" (dict "serviceName" (include "prometheus.alertmanager.fullname" $) "servicePort" "http" "context" $) | nindent 14 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.ingress.extraRules }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.ingress.extraRules "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or (and .Values.alertmanager.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.alertmanager.ingress.annotations )) .Values.alertmanager.ingress.selfSigned)) .Values.alertmanager.ingress.extraTls }}
|
||||
tls:
|
||||
{{- if and .Values.alertmanager.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.alertmanager.ingress.annotations )) .Values.alertmanager.ingress.selfSigned) }}
|
||||
- hosts:
|
||||
- {{ .Values.alertmanager.ingress.hostname | quote }}
|
||||
secretName: {{ printf "%s-tls" .Values.alertmanager.ingress.hostname }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.ingress.extraTls }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.ingress.extraTls "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
28
bitnami/prometheus/templates/alertmanager/pdb.yaml
Normal file
28
bitnami/prometheus/templates/alertmanager/pdb.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
{{- $replicaCount := int .Values.alertmanager.replicaCount }}
|
||||
{{- if and .Values.alertmanager.enabled .Values.alertmanager.pdb.create (gt $replicaCount 1) }}
|
||||
apiVersion: {{ include "common.capabilities.policy.apiVersion" . }}
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ include "prometheus.alertmanager.fullname" . | quote }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: alertmanager
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.alertmanager.pdb.minAvailable }}
|
||||
minAvailable: {{ .Values.alertmanager.pdb.minAvailable }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.pdb.maxUnavailable }}
|
||||
maxUnavailable: {{ .Values.alertmanager.pdb.maxUnavailable }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: alertmanager
|
||||
{{- end }}
|
||||
@@ -0,0 +1,23 @@
|
||||
{{- if and .Values.alertmanager.enabled .Values.alertmanager.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "prometheus.alertmanager.serviceAccountName" . | quote }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: alertmanager
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.alertmanager.serviceAccount.annotations .Values.commonAnnotations }}
|
||||
annotations:
|
||||
{{- if .Values.commonAnnotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.serviceAccount.annotations }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.serviceAccount.annotations "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: {{ .Values.alertmanager.serviceAccount.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,40 @@
|
||||
{{- if and .Values.alertmanager.enabled (gt (int .Values.alertmanager.replicaCount) 1) }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ printf "%s-headless" (include "prometheus.alertmanager.fullname" .) }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: alertmanager
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.alertmanager.service.annotations .Values.commonAnnotations }}
|
||||
annotations:
|
||||
{{- if .Values.alertmanager.service.annotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.alertmanager.service.annotations "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: tcp-cluster
|
||||
port: {{ .Values.alertmanager.service.ports.cluster }}
|
||||
protocol: TCP
|
||||
targetPort: tcp-cluster
|
||||
- name: udp-cluster
|
||||
port: {{ .Values.alertmanager.service.ports.cluster }}
|
||||
protocol: UDP
|
||||
targetPort: udp-cluster
|
||||
{{- if .Values.alertmanager.service.extraPorts }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.service.extraPorts "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: alertmanager
|
||||
{{- end }}
|
||||
58
bitnami/prometheus/templates/alertmanager/service.yaml
Normal file
58
bitnami/prometheus/templates/alertmanager/service.yaml
Normal file
@@ -0,0 +1,58 @@
|
||||
{{- if .Values.alertmanager.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "prometheus.alertmanager.fullname" . | quote }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: alertmanager
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.alertmanager.service.annotations .Values.commonAnnotations }}
|
||||
annotations:
|
||||
{{- if .Values.alertmanager.service.annotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.alertmanager.service.annotations "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.alertmanager.service.type }}
|
||||
{{- if and .Values.alertmanager.service.clusterIP (eq .Values.alertmanager.service.type "ClusterIP") }}
|
||||
clusterIP: {{ .Values.alertmanager.service.clusterIP }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.service.sessionAffinity }}
|
||||
sessionAffinity: {{ .Values.alertmanager.service.sessionAffinity }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.service.sessionAffinityConfig }}
|
||||
sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.service.sessionAffinityConfig "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or (eq .Values.alertmanager.service.type "LoadBalancer") (eq .Values.alertmanager.service.type "NodePort") }}
|
||||
externalTrafficPolicy: {{ .Values.alertmanager.service.externalTrafficPolicy | quote }}
|
||||
{{- end }}
|
||||
{{- if and (eq .Values.alertmanager.service.type "LoadBalancer") (not (empty .Values.alertmanager.service.loadBalancerSourceRanges)) }}
|
||||
loadBalancerSourceRanges: {{ .Values.alertmanager.service.loadBalancerSourceRanges }}
|
||||
{{- end }}
|
||||
{{- if and (eq .Values.alertmanager.service.type "LoadBalancer") (not (empty .Values.alertmanager.service.loadBalancerIP)) }}
|
||||
loadBalancerIP: {{ .Values.alertmanager.service.loadBalancerIP }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
port: {{ .Values.alertmanager.service.ports.http }}
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
{{- if and (or (eq .Values.alertmanager.service.type "NodePort") (eq .Values.alertmanager.service.type "LoadBalancer")) (not (empty .Values.alertmanager.service.nodePorts.http)) }}
|
||||
nodePort: {{ .Values.alertmanager.service.nodePorts.http }}
|
||||
{{- else if eq .Values.alertmanager.service.type "ClusterIP" }}
|
||||
nodePort: null
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.service.extraPorts }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.service.extraPorts "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: alertmanager
|
||||
{{- end }}
|
||||
245
bitnami/prometheus/templates/alertmanager/statefulset.yaml
Normal file
245
bitnami/prometheus/templates/alertmanager/statefulset.yaml
Normal file
@@ -0,0 +1,245 @@
|
||||
{{- if .Values.alertmanager.enabled }}
|
||||
{{- $clusterPort := .Values.alertmanager.containerPorts.cluster }}
|
||||
apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ include "prometheus.alertmanager.fullname" . | quote }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: alertmanager
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
replicas: {{ .Values.alertmanager.replicaCount }}
|
||||
podManagementPolicy: {{ .Values.alertmanager.podManagementPolicy | quote }}
|
||||
selector:
|
||||
matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: alertmanager
|
||||
serviceName: {{ printf "%s-headless" (include "prometheus.alertmanager.fullname" .) }}
|
||||
{{- if .Values.alertmanager.updateStrategy }}
|
||||
updateStrategy: {{- toYaml .Values.alertmanager.updateStrategy | nindent 4 }}
|
||||
{{- end }}
|
||||
template:
|
||||
metadata:
|
||||
{{- if .Values.alertmanager.podAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.podAnnotations "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 8 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: alertmanager
|
||||
{{- if .Values.alertmanager.podLabels }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.podLabels "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
serviceAccountName: {{ template "prometheus.alertmanager.serviceAccountName" . }}
|
||||
{{- include "prometheus.imagePullSecrets" . | nindent 6 }}
|
||||
{{- if .Values.alertmanager.hostAliases }}
|
||||
hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.hostAliases "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.affinity }}
|
||||
affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.alertmanager.affinity "context" $) | nindent 8 }}
|
||||
{{- else }}
|
||||
affinity:
|
||||
podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.alertmanager.podAffinityPreset "component" "alertmanager" "context" $) | nindent 10 }}
|
||||
podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.alertmanager.podAntiAffinityPreset "component" "alertmanager" "context" $) | nindent 10 }}
|
||||
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.alertmanager.nodeAffinityPreset.type "key" .Values.alertmanager.nodeAffinityPreset.key "values" .Values.alertmanager.nodeAffinityPreset.values) | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.nodeSelector }}
|
||||
nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.alertmanager.nodeSelector "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.tolerations }}
|
||||
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.tolerations "context" .) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.priorityClassName }}
|
||||
priorityClassName: {{ .Values.alertmanager.priorityClassName | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.schedulerName }}
|
||||
schedulerName: {{ .Values.alertmanager.schedulerName | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.topologySpreadConstraints }}
|
||||
topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.topologySpreadConstraints "context" .) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.podSecurityContext.enabled }}
|
||||
securityContext: {{- omit .Values.alertmanager.podSecurityContext "enabled" | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.terminationGracePeriodSeconds }}
|
||||
terminationGracePeriodSeconds: {{ .Values.alertmanager.terminationGracePeriodSeconds }}
|
||||
{{- end }}
|
||||
initContainers:
|
||||
{{- if and .Values.volumePermissions.enabled .Values.alertmanager.persistence.enabled }}
|
||||
- name: volume-permissions
|
||||
image: {{ include "prometheus.volumePermissions.image" . }}
|
||||
imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
|
||||
command:
|
||||
- sh
|
||||
- -ec
|
||||
- |
|
||||
mkdir -p {{ .Values.alertmanager.persistence.mountPath }}
|
||||
find {{ .Values.alertmanager.persistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.alertmanager.containerSecurityContext.runAsUser }}:{{ .Values.alertmanager.podSecurityContext.fsGroup }}
|
||||
{{- if .Values.alertmanager.containerSecurityContext.enabled }}
|
||||
securityContext: {{- omit .Values.alertmanager.containerSecurityContext "enabled" | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.volumePermissions.resources }}
|
||||
resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: {{ .Values.alertmanager.persistence.mountPath }}
|
||||
{{- if .Values.alertmanager.persistence.subPath }}
|
||||
subPath: {{ .Values.alertmanager.persistence.subPath }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.initContainers }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.initContainers "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: alertmanager
|
||||
image: {{ template "prometheus.alertmanager.image" . }}
|
||||
imagePullPolicy: {{ .Values.alertmanager.image.pullPolicy }}
|
||||
{{- if .Values.alertmanager.containerSecurityContext.enabled }}
|
||||
securityContext: {{- omit .Values.alertmanager.containerSecurityContext "enabled" | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.diagnosticMode.enabled }}
|
||||
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
|
||||
{{- else if .Values.alertmanager.command }}
|
||||
command: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.command "context" $) | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.diagnosticMode.enabled }}
|
||||
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
|
||||
{{- else if .Values.alertmanager.args }}
|
||||
args: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.args "context" $) | nindent 12 }}
|
||||
{{- else }}
|
||||
args:
|
||||
- "--config.file=/opt/bitnami/alertmanager/conf/{{ include "prometheus.alertmanager.configmapKey" . }}"
|
||||
- "--storage.path=/opt/bitnami/alertmanager/data"
|
||||
- "--web.listen-address=0.0.0.0:{{ .Values.alertmanager.containerPorts.http }}"
|
||||
{{- if gt (int .Values.alertmanager.replicaCount) 1 }}
|
||||
- "--cluster.advertise-address=[$(POD_IP)]:{{ $clusterPort }}"
|
||||
- "--cluster.listen-address=0.0.0.0:{{ $clusterPort }}"
|
||||
{{- $fullName := include "prometheus.alertmanager.fullname" . }}
|
||||
{{- range $i := until (int .Values.alertmanager.replicaCount) }}
|
||||
- "--cluster.peer={{ $fullName }}-{{ $i }}.{{ $fullName }}-headless:{{ $clusterPort }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.extraArgs }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.extraArgs "context" $) | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: status.podIP
|
||||
{{- if .Values.alertmanager.extraEnvVars }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.extraEnvVars "context" $) | nindent 12 }}
|
||||
{{- end }}
|
||||
envFrom:
|
||||
{{- if .Values.alertmanager.extraEnvVarsCM }}
|
||||
- configMapRef:
|
||||
name: {{ include "common.tplvalues.render" (dict "value" .Values.alertmanager.extraEnvVarsCM "context" $) }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.extraEnvVarsSecret }}
|
||||
- secretRef:
|
||||
name: {{ include "common.tplvalues.render" (dict "value" .Values.alertmanager.extraEnvVarsSecret "context" $) }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.resources }}
|
||||
resources: {{- toYaml .Values.alertmanager.resources | nindent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: {{ .Values.alertmanager.containerPorts.http }}
|
||||
- name: tcp-cluster
|
||||
containerPort: {{ $clusterPort }}
|
||||
protocol: TCP
|
||||
- name: udp-cluster
|
||||
containerPort: {{ $clusterPort }}
|
||||
protocol: UDP
|
||||
{{- if not .Values.diagnosticMode.enabled }}
|
||||
{{- if .Values.alertmanager.customLivenessProbe }}
|
||||
livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.customLivenessProbe "context" $) | nindent 12 }}
|
||||
{{- else if .Values.alertmanager.livenessProbe.enabled }}
|
||||
livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.alertmanager.livenessProbe "enabled") "context" $) | nindent 12 }}
|
||||
httpGet:
|
||||
path: /-/healthy
|
||||
port: http
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.customReadinessProbe }}
|
||||
readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.customReadinessProbe "context" $) | nindent 12 }}
|
||||
{{- else if .Values.alertmanager.readinessProbe.enabled }}
|
||||
readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.alertmanager.readinessProbe "enabled") "context" $) | nindent 12 }}
|
||||
httpGet:
|
||||
path: /-/ready
|
||||
port: http
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.customStartupProbe }}
|
||||
startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.customStartupProbe "context" $) | nindent 12 }}
|
||||
{{- else if .Values.alertmanager.startupProbe.enabled }}
|
||||
startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.alertmanager.startupProbe "enabled") "context" $) | nindent 12 }}
|
||||
tcpSocket:
|
||||
port: http
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.lifecycleHooks }}
|
||||
lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.lifecycleHooks "context" $) | nindent 12 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /opt/bitnami/alertmanager/conf
|
||||
readOnly: true
|
||||
- name: data
|
||||
mountPath: {{ .Values.alertmanager.persistence.mountPath }}
|
||||
{{- if .Values.alertmanager.extraVolumeMounts }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.extraVolumeMounts "context" $) | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.sidecars }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.alertmanager.sidecars "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ include "prometheus.alertmanager.configmapName" . }}
|
||||
{{- if not .Values.alertmanager.persistence.enabled }}
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
{{- end}}
|
||||
{{- if .Values.alertmanager.extraVolumes }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.extraVolumes "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.persistence.enabled }}
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
annotations:
|
||||
{{- if .Values.alertmanager.persistence.annotations }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.persistence.annotations "context" $) | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonLabels }}
|
||||
labels: {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 10 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
accessModes:
|
||||
{{- range .Values.alertmanager.persistence.accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.alertmanager.persistence.size | quote }}
|
||||
{{- if .Values.alertmanager.persistence.selector }}
|
||||
selector: {{- include "common.tplvalues.render" (dict "value" .Values.alertmanager.persistence.selector "context" $) | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- include "common.storage.class" (dict "persistence" .Values.alertmanager.persistence "global" .Values.global) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
4
bitnami/prometheus/templates/extra-list.yaml
Normal file
4
bitnami/prometheus/templates/extra-list.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
{{- range .Values.extraDeploy }}
|
||||
---
|
||||
{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
|
||||
{{- end }}
|
||||
49
bitnami/prometheus/templates/server/clusterrole.yaml
Normal file
49
bitnami/prometheus/templates/server/clusterrole.yaml
Normal file
@@ -0,0 +1,49 @@
|
||||
{{- if .Values.server.rbac.create }}
|
||||
kind: ClusterRole
|
||||
apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
|
||||
metadata:
|
||||
name: {{ include "prometheus.server.fullname.namespace" . }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: server
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
# These rules come from <https://github.com/prometheus-community/helm-charts/blob/main/charts/prometheus/templates/clusterrole.yaml>
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
- nodes/proxy
|
||||
- nodes/metrics
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
- ingresses
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
- "networking.k8s.io"
|
||||
resources:
|
||||
- ingresses/status
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- nonResourceURLs:
|
||||
- "/metrics"
|
||||
verbs:
|
||||
- get
|
||||
{{- if .Values.server.rbac.rules }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.server.rbac.rules "context" $ ) | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
21
bitnami/prometheus/templates/server/clusterrolebinding.yaml
Normal file
21
bitnami/prometheus/templates/server/clusterrolebinding.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
|
||||
metadata:
|
||||
name: {{ template "prometheus.server.fullname.namespace" . }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: server
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "prometheus.server.fullname.namespace" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "prometheus.server.serviceAccountName" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
21
bitnami/prometheus/templates/server/configmap.yaml
Normal file
21
bitnami/prometheus/templates/server/configmap.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
{{- if not .Values.existingConfigmap }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "prometheus.server.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: server
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
data:
|
||||
{{ include "prometheus.server.configmapKey" . }}:
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.configuration "context" $) | toYaml | nindent 4 }}
|
||||
rules.yaml:
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.alertingRules "context" $) | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
277
bitnami/prometheus/templates/server/deployment.yaml
Normal file
277
bitnami/prometheus/templates/server/deployment.yaml
Normal file
@@ -0,0 +1,277 @@
|
||||
apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }}
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "prometheus.server.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: server
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
replicas: {{ .Values.server.replicaCount }}
|
||||
{{- if .Values.server.updateStrategy }}
|
||||
strategy: {{- toYaml .Values.server.updateStrategy | nindent 4 }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: server
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/configmap: {{ include (print $.Template.BasePath "/server/configmap.yaml") . | sha256sum }}
|
||||
{{- if .Values.server.podAnnotations }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.podAnnotations "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 8 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: server
|
||||
{{- if .Values.server.podLabels }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.podLabels "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
serviceAccountName: {{ template "prometheus.server.serviceAccountName" . }}
|
||||
{{- include "prometheus.imagePullSecrets" . | nindent 6 }}
|
||||
{{- if .Values.server.hostAliases }}
|
||||
hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.server.hostAliases "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.affinity }}
|
||||
affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.server.affinity "context" $) | nindent 8 }}
|
||||
{{- else }}
|
||||
affinity:
|
||||
podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.server.podAffinityPreset "component" "server" "context" $) | nindent 10 }}
|
||||
podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.server.podAntiAffinityPreset "component" "server" "context" $) | nindent 10 }}
|
||||
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.server.nodeAffinityPreset.type "key" .Values.server.nodeAffinityPreset.key "values" .Values.server.nodeAffinityPreset.values) | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.nodeSelector }}
|
||||
nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.server.nodeSelector "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.tolerations }}
|
||||
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.server.tolerations "context" .) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.priorityClassName }}
|
||||
priorityClassName: {{ .Values.server.priorityClassName | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.schedulerName }}
|
||||
schedulerName: {{ .Values.server.schedulerName | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.topologySpreadConstraints }}
|
||||
topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.server.topologySpreadConstraints "context" .) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.podSecurityContext.enabled }}
|
||||
securityContext: {{- omit .Values.server.podSecurityContext "enabled" | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.terminationGracePeriodSeconds }}
|
||||
terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }}
|
||||
{{- end }}
|
||||
initContainers:
|
||||
{{- if and .Values.volumePermissions.enabled .Values.server.persistence.enabled }}
|
||||
- name: volume-permissions
|
||||
image: {{ include "prometheus.volumePermissions.image" . }}
|
||||
imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
|
||||
command:
|
||||
- sh
|
||||
- -ec
|
||||
- |
|
||||
mkdir -p {{ .Values.server.persistence.mountPath }}
|
||||
find {{ .Values.server.persistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.server.containerSecurityContext.runAsUser }}:{{ .Values.server.podSecurityContext.fsGroup }}
|
||||
securityContext: {{- include "common.tplvalues.render" (dict "value" .Values.volumePermissions.containerSecurityContext "context" $) | nindent 12 }}
|
||||
{{- if .Values.volumePermissions.resources }}
|
||||
resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: {{ .Values.server.persistence.mountPath }}
|
||||
{{- if .Values.server.persistence.subPath }}
|
||||
subPath: {{ .Values.server.persistence.subPath }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.initContainers }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.initContainers "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: {{ template "prometheus.server.image" . }}
|
||||
imagePullPolicy: {{ .Values.server.image.pullPolicy }}
|
||||
{{- if .Values.server.containerSecurityContext.enabled }}
|
||||
securityContext: {{- omit .Values.server.containerSecurityContext "enabled" | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.diagnosticMode.enabled }}
|
||||
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
|
||||
{{- else if .Values.server.command }}
|
||||
command: {{- include "common.tplvalues.render" (dict "value" .Values.server.command "context" $) | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.diagnosticMode.enabled }}
|
||||
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
|
||||
{{- else if .Values.server.args }}
|
||||
args: {{- include "common.tplvalues.render" (dict "value" .Values.server.args "context" $) | nindent 12 }}
|
||||
{{- else }}
|
||||
args:
|
||||
- "--config.file=/opt/bitnami/prometheus/conf/{{ include "prometheus.server.configmapKey" . }}"
|
||||
- "--storage.tsdb.path={{ .Values.server.persistence.mountPath }}"
|
||||
- "--storage.tsdb.retention.time={{ .Values.server.retention }}"
|
||||
- "--storage.tsdb.retention.size={{ .Values.server.retentionSize }}"
|
||||
- "--log.level={{ .Values.server.logLevel }}"
|
||||
- "--log.format={{ .Values.server.logFormat }}"
|
||||
- "--web.listen-address=:{{ .Values.server.containerPorts.http }}"
|
||||
- "--web.console.libraries=/opt/bitnami/prometheus/conf/console_libraries"
|
||||
- "--web.console.templates=/opt/bitnami/prometheus/conf/consoles"
|
||||
{{- if .Values.server.enableAdminAPI}}
|
||||
- "--web.enable-admin-api"
|
||||
{{- end }}
|
||||
{{- if .Values.server.enableRemoteWriteReceiver }}
|
||||
- "--web.enable-remote-write-receiver"
|
||||
{{- end }}
|
||||
{{- if .Values.server.routePrefix }}
|
||||
- "--web.route-prefix={{ .Values.server.routePrefix }}"
|
||||
{{- end }}
|
||||
{{- if .Values.server.enableFeatures }}
|
||||
- "--enable-feature={{ join "," .Values.server.enableFeatures }}"
|
||||
{{- end }}
|
||||
{{- if .Values.server.extraArgs }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.extraArgs "context" $) | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
env:
|
||||
{{- if .Values.server.extraEnvVars }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.extraEnvVars "context" $) | nindent 12 }}
|
||||
{{- end }}
|
||||
envFrom:
|
||||
{{- if .Values.server.extraEnvVarsCM }}
|
||||
- configMapRef:
|
||||
name: {{ include "common.tplvalues.render" (dict "value" .Values.server.extraEnvVarsCM "context" $) }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.extraEnvVarsSecret }}
|
||||
- secretRef:
|
||||
name: {{ include "common.tplvalues.render" (dict "value" .Values.server.extraEnvVarsSecret "context" $) }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.resources }}
|
||||
resources: {{- toYaml .Values.server.resources | nindent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: {{ .Values.server.containerPorts.http }}
|
||||
{{- if not .Values.diagnosticMode.enabled }}
|
||||
{{- if .Values.server.customLivenessProbe }}
|
||||
livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.server.customLivenessProbe "context" $) | nindent 12 }}
|
||||
{{- else if .Values.server.livenessProbe.enabled }}
|
||||
livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.server.livenessProbe "enabled") "context" $) | nindent 12 }}
|
||||
httpGet:
|
||||
path: /-/healthy
|
||||
port: http
|
||||
{{- end }}
|
||||
{{- if .Values.server.customReadinessProbe }}
|
||||
readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.server.customReadinessProbe "context" $) | nindent 12 }}
|
||||
{{- else if .Values.server.readinessProbe.enabled }}
|
||||
readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.server.readinessProbe "enabled") "context" $) | nindent 12 }}
|
||||
httpGet:
|
||||
path: /-/ready
|
||||
port: http
|
||||
{{- end }}
|
||||
{{- if .Values.server.customStartupProbe }}
|
||||
startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.server.customStartupProbe "context" $) | nindent 12 }}
|
||||
{{- else if .Values.server.startupProbe.enabled }}
|
||||
startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.server.startupProbe "enabled") "context" $) | nindent 12 }}
|
||||
tcpSocket:
|
||||
port: http
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.lifecycleHooks }}
|
||||
lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.server.lifecycleHooks "context" $) | nindent 12 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /opt/bitnami/prometheus/conf
|
||||
readOnly: true
|
||||
- name: data
|
||||
mountPath: {{ .Values.server.persistence.mountPath }}
|
||||
{{- if .Values.server.extraVolumeMounts }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.extraVolumeMounts "context" $) | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.thanos.create }}
|
||||
- name: thanos-sidecar
|
||||
image: {{ template "prometheus.server.thanosImage" . }}
|
||||
imagePullPolicy: {{ .Values.server.thanos.image.pullPolicy }}
|
||||
args:
|
||||
- sidecar
|
||||
- --prometheus.url={{ default "http://localhost:9090" .Values.server.thanos.prometheusUrl }}
|
||||
- --grpc-address=0.0.0.0:10901
|
||||
- --http-address=0.0.0.0:10902
|
||||
- --tsdb.path=/prometheus/
|
||||
{{- if .Values.server.thanos.objectStorageConfig.secretName }}
|
||||
- --objstore.config=$(OBJSTORE_CONFIG)
|
||||
{{- end }}
|
||||
{{- if .Values.server.thanos.extraArgs }}
|
||||
{{ toYaml .Values.server.thanos.extraArgs | indent 12 | trim }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.thanos.objectStorageConfig.secretName }}
|
||||
env:
|
||||
- name: OBJSTORE_CONFIG
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Values.server.thanos.objectStorageConfig.secretName }}
|
||||
key: {{ .Values.server.thanos.objectStorageConfig.secretKey | default "thanos.yaml" }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.thanos.resources }}
|
||||
resources: {{- toYaml .Values.server.thanos.resources | nindent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: grpc
|
||||
containerPort: 10901
|
||||
protocol: TCP
|
||||
- name: http
|
||||
containerPort: 10902
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: /prometheus
|
||||
name: data
|
||||
{{- if .Values.server.thanos.extraVolumeMounts }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.thanos.extraVolumeMounts "context" $) | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.thanos.containerSecurityContext.enabled }}
|
||||
securityContext: {{- omit .Values.server.thanos.containerSecurityContext "enabled" | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.thanos.customLivenessProbe }}
|
||||
livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.server.thanos.customLivenessProbe "context" $) | nindent 12 }}
|
||||
{{- else if .Values.server.thanos.livenessProbe.enabled }}
|
||||
livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.server.thanos.livenessProbe "enabled") "context" $) | nindent 12 }}
|
||||
httpGet:
|
||||
path: /-/healthy
|
||||
port: http
|
||||
scheme: HTTP
|
||||
{{- end }}
|
||||
{{- if .Values.server.thanos.customReadinessProbe }}
|
||||
readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.server.thanos.customReadinessProbe "context" $) | nindent 12 }}
|
||||
{{- else if .Values.server.thanos.livenessProbe.enabled }}
|
||||
readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.server.thanos.readinessProbe "enabled") "context" $) | nindent 12 }}
|
||||
httpGet:
|
||||
path: /-/ready
|
||||
port: http
|
||||
scheme: HTTP
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.sidecars }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.server.sidecars "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ include "prometheus.server.configmapName" . }}
|
||||
- name: data
|
||||
{{- if .Values.server.persistence.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ default (include "prometheus.server.fullname" .) .Values.server.persistence.existingClaim }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- if .Values.server.extraVolumes }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.extraVolumes "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
64
bitnami/prometheus/templates/server/ingress.yaml
Normal file
64
bitnami/prometheus/templates/server/ingress.yaml
Normal file
@@ -0,0 +1,64 @@
|
||||
{{- if .Values.server.ingress.enabled }}
|
||||
apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ template "prometheus.server.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: alertmanager
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.server.ingress.annotations .Values.commonAnnotations }}
|
||||
annotations:
|
||||
{{- if .Values.server.ingress.annotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.server.ingress.annotations "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if and .Values.server.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }}
|
||||
ingressClassName: {{ .Values.server.ingress.ingressClassName | quote }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- if .Values.server.ingress.hostname }}
|
||||
- host: {{ .Values.server.ingress.hostname }}
|
||||
http:
|
||||
paths:
|
||||
{{- if .Values.server.ingress.extraPaths }}
|
||||
{{- toYaml .Values.server.ingress.extraPaths | nindent 10 }}
|
||||
{{- end }}
|
||||
- path: {{ .Values.server.ingress.path }}
|
||||
{{- if eq "true" (include "common.ingress.supportsPathType" .) }}
|
||||
pathType: {{ .Values.server.ingress.pathType }}
|
||||
{{- end }}
|
||||
backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" "http" "context" $) | nindent 14 }}
|
||||
{{- end }}
|
||||
{{- range .Values.server.ingress.extraHosts }}
|
||||
- host: {{ .name | quote }}
|
||||
http:
|
||||
paths:
|
||||
- path: {{ default "/" .path }}
|
||||
{{- if eq "true" (include "common.ingress.supportsPathType" $) }}
|
||||
pathType: {{ default "ImplementationSpecific" .pathType }}
|
||||
{{- end }}
|
||||
backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "http" "context" $) | nindent 14 }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.ingress.extraRules }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.ingress.extraRules "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or (and .Values.server.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.server.ingress.annotations )) .Values.server.ingress.selfSigned)) .Values.server.ingress.extraTls }}
|
||||
tls:
|
||||
{{- if and .Values.server.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.server.ingress.annotations )) .Values.server.ingress.selfSigned) }}
|
||||
- hosts:
|
||||
- {{ .Values.server.ingress.hostname | quote }}
|
||||
secretName: {{ printf "%s-tls" .Values.server.ingress.hostname }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.ingress.extraTls }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.ingress.extraTls "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
28
bitnami/prometheus/templates/server/pdb.yaml
Normal file
28
bitnami/prometheus/templates/server/pdb.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
{{- $replicaCount := int .Values.replicaCount }}
|
||||
{{- if and .Values.server.pdb.create (gt $replicaCount 1) }}
|
||||
apiVersion: {{ include "common.capabilities.policy.apiVersion" . }}
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ include "prometheus.server.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: server
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.server.pdb.minAvailable }}
|
||||
minAvailable: {{ .Values.server.pdb.minAvailable }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.pdb.maxUnavailable }}
|
||||
maxUnavailable: {{ .Values.server.pdb.maxUnavailable }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: server
|
||||
{{- end }}
|
||||
37
bitnami/prometheus/templates/server/pvc.yaml
Normal file
37
bitnami/prometheus/templates/server/pvc.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
{{- if and .Values.server.persistence.enabled (not .Values.server.persistence.existingClaim) -}}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ include "prometheus.server.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: server
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.server.persistence.annotations .Values.commonAnnotations }}
|
||||
annotations:
|
||||
{{- if .Values.server.persistence.annotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.server.persistence.annotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
accessModes:
|
||||
{{- range .Values.server.persistence.accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.server.persistence.size | quote }}
|
||||
{{- if .Values.server.persistence.selector }}
|
||||
selector: {{- include "common.tplvalues.render" (dict "value" .Values.server.persistence.selector "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.persistence.dataSource }}
|
||||
dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.server.persistence.dataSource "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- include "common.storage.class" (dict "persistence" .Values.server.persistence "global" .Values.global) | nindent 2 }}
|
||||
{{- end -}}
|
||||
23
bitnami/prometheus/templates/server/service-account.yaml
Normal file
23
bitnami/prometheus/templates/server/service-account.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
{{- if .Values.server.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "prometheus.server.serviceAccountName" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: server
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.server.serviceAccount.annotations .Values.commonAnnotations }}
|
||||
annotations:
|
||||
{{- if .Values.commonAnnotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.serviceAccount.annotations }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.serviceAccount.annotations "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: {{ .Values.server.serviceAccount.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
56
bitnami/prometheus/templates/server/service.yaml
Normal file
56
bitnami/prometheus/templates/server/service.yaml
Normal file
@@ -0,0 +1,56 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "prometheus.server.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: server
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.server.service.annotations .Values.commonAnnotations }}
|
||||
annotations:
|
||||
{{- if .Values.server.service.annotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.server.service.annotations "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.server.service.type }}
|
||||
{{- if and .Values.server.service.clusterIP (eq .Values.server.service.type "ClusterIP") }}
|
||||
clusterIP: {{ .Values.server.service.clusterIP }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.service.sessionAffinity }}
|
||||
sessionAffinity: {{ .Values.server.service.sessionAffinity }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.service.sessionAffinityConfig }}
|
||||
sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.server.service.sessionAffinityConfig "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or (eq .Values.server.service.type "LoadBalancer") (eq .Values.server.service.type "NodePort") }}
|
||||
externalTrafficPolicy: {{ .Values.server.service.externalTrafficPolicy | quote }}
|
||||
{{- end }}
|
||||
{{- if and (eq .Values.server.service.type "LoadBalancer") (not (empty .Values.server.service.loadBalancerSourceRanges)) }}
|
||||
loadBalancerSourceRanges: {{ .Values.server.service.loadBalancerSourceRanges }}
|
||||
{{- end }}
|
||||
{{- if and (eq .Values.server.service.type "LoadBalancer") (not (empty .Values.server.service.loadBalancerIP)) }}
|
||||
loadBalancerIP: {{ .Values.server.service.loadBalancerIP }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
port: {{ .Values.server.service.ports.http }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
{{- if and (or (eq .Values.server.service.type "NodePort") (eq .Values.server.service.type "LoadBalancer")) (not (empty .Values.server.service.nodePorts.http)) }}
|
||||
nodePort: {{ .Values.server.service.nodePorts.http }}
|
||||
{{- else if eq .Values.server.service.type "ClusterIP" }}
|
||||
nodePort: null
|
||||
{{- end }}
|
||||
{{- if .Values.server.service.extraPorts }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.service.extraPorts "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: server
|
||||
63
bitnami/prometheus/templates/server/thanos-ingress.yaml
Normal file
63
bitnami/prometheus/templates/server/thanos-ingress.yaml
Normal file
@@ -0,0 +1,63 @@
|
||||
{{- if and .Values.server.thanos.create .Values.server.thanos.ingress.enabled }}
|
||||
apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ template "prometheus.thanos-sidecar.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: server
|
||||
app.kubernetes.io/subcomponent: thanos
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
{{- if .Values.server.thanos.ingress.annotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.server.thanos.ingress.annotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if and .Values.server.thanos.ingress.ingressClassName (include "common.ingress.supportsIngressClassname" .) }}
|
||||
ingressClassName: {{ .Values.server.thanos.ingress.ingressClassName | quote }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- if .Values.server.thanos.ingress.hostname }}
|
||||
- host: {{ .Values.server.thanos.ingress.hostname }}
|
||||
http:
|
||||
paths:
|
||||
{{- if .Values.server.thanos.ingress.extraPaths }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.thanos.ingress.extraPaths "context" $) | nindent 10 }}
|
||||
{{- end }}
|
||||
- path: {{ .Values.server.thanos.ingress.path }}
|
||||
{{- if eq "true" (include "common.ingress.supportsPathType" .) }}
|
||||
pathType: {{ .Values.server.thanos.ingress.pathType }}
|
||||
{{- end }}
|
||||
backend: {{- include "common.ingress.backend" (dict "serviceName" (include "prometheus.thanos-sidecar.fullname" $) "servicePort" "grpc" "context" $) | nindent 14 }}
|
||||
{{- end }}
|
||||
{{- range .Values.server.thanos.ingress.extraHosts }}
|
||||
- host: {{ .name | quote }}
|
||||
http:
|
||||
paths:
|
||||
- path: {{ default "/" .path }}
|
||||
{{- if eq "true" (include "common.ingress.supportsPathType" $) }}
|
||||
pathType: {{ default "ImplementationSpecific" .pathType }}
|
||||
{{- end }}
|
||||
backend: {{- include "common.ingress.backend" (dict "serviceName" (include "prometheus.thanos-sidecar.fullname" $) "servicePort" "grpc" "context" $) | nindent 14 }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.thanos.ingress.extraRules }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.thanos.ingress.extraRules "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or (and .Values.server.thanos.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.server.thanos.ingress.annotations )) .Values.server.thanos.ingress.selfSigned)) .Values.server.thanos.ingress.extraTls }}
|
||||
tls:
|
||||
{{- if and .Values.server.thanos.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.server.thanos.ingress.annotations )) .Values.server.thanos.ingress.selfSigned) }}
|
||||
- hosts:
|
||||
- {{ .Values.server.thanos.ingress.hostname | quote }}
|
||||
secretName: {{ printf "%s-tls" .Values.server.thanos.ingress.hostname }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.thanos.ingress.extraTls }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.thanos.ingress.extraTls "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
55
bitnami/prometheus/templates/server/thanos-service.yaml
Normal file
55
bitnami/prometheus/templates/server/thanos-service.yaml
Normal file
@@ -0,0 +1,55 @@
|
||||
{{- if .Values.server.thanos.create }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "prometheus.thanos-sidecar.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: server
|
||||
app.kubernetes.io/subcomponent: thanos
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
{{- if .Values.commonAnnotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.thanos.service.annotations }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.thanos.service.annotations "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.server.thanos.service.type }}
|
||||
{{- if and (eq .Values.server.thanos.service.type "LoadBalancer") (not (empty .Values.server.thanos.service.loadBalancerIP)) }}
|
||||
loadBalancerIP: {{ .Values.server.thanos.service.loadBalancerIP }}
|
||||
{{- end }}
|
||||
{{- if and (eq .Values.server.thanos.service.type "LoadBalancer") (not (empty .Values.server.thanos.service.loadBalancerSourceRanges)) }}
|
||||
loadBalancerSourceRanges: {{ .Values.server.thanos.service.loadBalancerSourceRanges }}
|
||||
{{- end }}
|
||||
{{- if and .Values.server.thanos.service.clusterIP (eq .Values.server.thanos.service.type "ClusterIP") }}
|
||||
clusterIP: {{ .Values.server.thanos.service.clusterIP }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.thanos.service.sessionAffinity }}
|
||||
sessionAffinity: {{ .Values.server.thanos.service.sessionAffinity }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.thanos.service.sessionAffinityConfig }}
|
||||
sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.server.thanos.service.sessionAffinityConfig "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or (eq .Values.server.thanos.service.type "LoadBalancer") (eq .Values.server.thanos.service.type "NodePort") }}
|
||||
externalTrafficPolicy: {{ .Values.server.thanos.service.externalTrafficPolicy | quote }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: grpc
|
||||
port: {{ .Values.server.thanos.service.ports.grpc }}
|
||||
targetPort: grpc
|
||||
protocol: TCP
|
||||
{{- if and .Values.server.thanos.service.nodePorts.grpc (or (eq .Values.server.thanos.service.type "NodePort") (eq .Values.server.thanos.service.type "LoadBalancer")) }}
|
||||
nodePort: {{ .Values.server.thanos.service.nodePorts.grpc }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.thanos.service.extraPorts }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.server.thanos.service.extraPorts "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: server
|
||||
{{- end }}
|
||||
151
bitnami/prometheus/templates/tls-secret.yaml
Normal file
151
bitnami/prometheus/templates/tls-secret.yaml
Normal file
@@ -0,0 +1,151 @@
|
||||
{{- if or .Values.server.ingress.enabled
|
||||
(and .Values.alertmanager.enabled .Values.alertmanager.ingress.enabled)
|
||||
(and .Values.server.thanos.create .Values.server.thanos.ingress.enabled) }}
|
||||
{{- if .Values.server.ingress.secrets }}
|
||||
{{- range .Values.server.ingress.secrets }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .name }}
|
||||
namespace: {{ include "common.names.namespace" $ | quote }}
|
||||
labels: {{- include "common.labels.standard" $ | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: server
|
||||
{{- if $.Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $.Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
type: kubernetes.io/tls
|
||||
data:
|
||||
tls.crt: {{ .certificate | b64enc }}
|
||||
tls.key: {{ .key | b64enc }}
|
||||
---
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.ingress.secrets }}
|
||||
{{- range .Values.alertmanager.ingress.secrets }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .name }}
|
||||
namespace: {{ include "common.names.namespace" $ | quote }}
|
||||
labels: {{- include "common.labels.standard" $ | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: alertmanager
|
||||
{{- if $.Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $.Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
type: kubernetes.io/tls
|
||||
data:
|
||||
tls.crt: {{ .certificate | b64enc }}
|
||||
tls.key: {{ .key | b64enc }}
|
||||
---
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.server.thanos.ingress.secrets }}
|
||||
{{- range .Values.server.thanos.ingress.secrets }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .name }}
|
||||
namespace: {{ include "common.names.namespace" $ | quote }}
|
||||
labels: {{- include "common.labels.standard" $ | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: server
|
||||
app.kubernetes.io/subcomponent: thanos
|
||||
{{- if $.Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $.Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
type: kubernetes.io/tls
|
||||
data:
|
||||
tls.crt: {{ .certificate | b64enc }}
|
||||
tls.key: {{ .key | b64enc }}
|
||||
---
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if (or (and .Values.server.ingress.tls .Values.server.ingress.selfSigned)
|
||||
(and .Values.alertmanager.ingress.tls .Values.alertmanager.ingress.selfSigned)
|
||||
(and .Values.server.thanos.ingress.tls .Values.server.thanos.ingress.selfSigned)) }}
|
||||
{{- $ca := genCA "prometheus-ca" 365 }}
|
||||
{{- if and .Values.server.ingress.tls .Values.server.ingress.selfSigned }}
|
||||
{{- $secretName := printf "%s-tls" .Values.server.ingress.hostname }}
|
||||
{{- $cert := genSignedCert .Values.server.ingress.hostname nil (list .Values.server.ingress.hostname) 365 $ca }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ $secretName }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: server
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
type: kubernetes.io/tls
|
||||
data:
|
||||
tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }}
|
||||
tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }}
|
||||
ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }}
|
||||
---
|
||||
{{- end }}
|
||||
{{- if and .Values.alertmanager.ingress.tls .Values.alertmanager.ingress.selfSigned }}
|
||||
{{- $secretName := printf "%s-tls" .Values.alertmanager.ingress.hostname }}
|
||||
{{- $cert := genSignedCert .Values.alertmanager.ingress.hostname nil (list .Values.alertmanager.ingress.hostname) 365 $ca }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ $secretName }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: alertmanager
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
type: kubernetes.io/tls
|
||||
data:
|
||||
tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }}
|
||||
tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }}
|
||||
ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- if and .Values.server.thanos.ingress.tls .Values.server.thanos.ingress.selfSigned }}
|
||||
{{- $secretName := printf "%s-tls" .Values.server.thanos.ingress.hostname }}
|
||||
{{- $cert := genSignedCert .Values.server.thanos.ingress.hostname nil (list .Values.server.thanos.ingress.hostname) 365 $ca }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ $secretName }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
app.kubernetes.io/part-of: prometheus
|
||||
app.kubernetes.io/component: server
|
||||
app.kubernetes.io/subcomponent: thanos
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
type: kubernetes.io/tls
|
||||
data:
|
||||
tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }}
|
||||
tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }}
|
||||
ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
1954
bitnami/prometheus/values.schema.json
Normal file
1954
bitnami/prometheus/values.schema.json
Normal file
File diff suppressed because it is too large
Load Diff
1508
bitnami/prometheus/values.yaml
Normal file
1508
bitnami/prometheus/values.yaml
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user