Merge branch 'master' into apacheChartLint

This commit is contained in:
Juan Ariza Toledano
2019-11-14 12:14:27 +01:00
committed by GitHub
298 changed files with 13502 additions and 12069 deletions

View File

@@ -1,6 +1,6 @@
apiVersion: v1
name: airflow
version: 4.0.11
version: 4.0.15
appVersion: 1.10.5
description: Apache Airflow is a platform to programmatically author, schedule and monitor workflows.
keywords:

View File

@@ -1,9 +1,9 @@
dependencies:
- name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 6.5.7
version: 6.5.9
- name: redis
repository: https://charts.bitnami.com/bitnami
version: 9.5.1
version: 9.5.3
digest: sha256:99c086e6c1e8c381e164fb75d158887bf9929a2babce2f2dd6778b71e3a7820f
generated: 2019-11-06T09:27:52.249053514Z
generated: 2019-11-12T19:49:18.104630905Z

View File

@@ -14,7 +14,7 @@
image:
registry: docker.io
repository: bitnami/airflow
tag: 1.10.5-debian-9-r52
tag: 1.10.5-debian-9-r56
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -38,7 +38,7 @@ image:
schedulerImage:
registry: docker.io
repository: bitnami/airflow-scheduler
tag: 1.10.5-debian-9-r57
tag: 1.10.5-debian-9-r65
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -62,7 +62,7 @@ schedulerImage:
workerImage:
registry: docker.io
repository: bitnami/airflow-worker
tag: 1.10.5-debian-9-r57
tag: 1.10.5-debian-9-r62
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -86,7 +86,7 @@ workerImage:
git:
registry: docker.io
repository: bitnami/git
tag: 2.24.0-debian-9-r0
tag: 2.24.0-debian-9-r7
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
@@ -346,7 +346,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/airflow-exporter
tag: 0.20180711.0-debian-9-r84
tag: 0.20180711.0-debian-9-r90
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.

View File

@@ -14,7 +14,7 @@
image:
registry: docker.io
repository: bitnami/airflow
tag: 1.10.5-debian-9-r52
tag: 1.10.5-debian-9-r56
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -38,7 +38,7 @@ image:
schedulerImage:
registry: docker.io
repository: bitnami/airflow-scheduler
tag: 1.10.5-debian-9-r57
tag: 1.10.5-debian-9-r65
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -62,7 +62,7 @@ schedulerImage:
workerImage:
registry: docker.io
repository: bitnami/airflow-worker
tag: 1.10.5-debian-9-r57
tag: 1.10.5-debian-9-r62
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -86,7 +86,7 @@ workerImage:
git:
registry: docker.io
repository: bitnami/git
tag: 2.24.0-debian-9-r0
tag: 2.24.0-debian-9-r7
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
@@ -346,7 +346,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/airflow-exporter
tag: 0.20180711.0-debian-9-r84
tag: 0.20180711.0-debian-9-r90
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.

View File

@@ -1,6 +1,6 @@
apiVersion: v1
name: apache
version: 7.2.5
version: 7.2.11
appVersion: 2.4.41
description: Chart for Apache HTTP Server
keywords:

View File

@@ -13,7 +13,7 @@
image:
registry: docker.io
repository: bitnami/apache
tag: 2.4.41-debian-9-r82
tag: 2.4.41-debian-9-r88
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
@@ -31,7 +31,7 @@ image:
git:
registry: docker.io
repository: bitnami/git
tag: 2.24.0-debian-9-r1
tag: 2.24.0-debian-9-r8
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
@@ -168,7 +168,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/apache-exporter
tag: 0.7.0-debian-9-r102
tag: 0.7.0-debian-9-r108
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.

View File

@@ -1,6 +1,6 @@
apiVersion: v1
name: elasticsearch
version: 8.0.3
version: 8.2.4
appVersion: 7.4.2
description: A highly scalable open-source full-text search and analytics engine
keywords:

View File

@@ -52,7 +52,7 @@ $ helm delete --purge my-release
The following table lists the configurable parameters of the Elasticsearch chart and their default values.
| Parameter | Description | Default |
| Parameter | Description | Default |
|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
| `global.imageRegistry` | Global Docker image registry | `nil` |
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) |
@@ -67,6 +67,8 @@ The following table lists the configurable parameters of the Elasticsearch chart
| `name` | Elasticsearch cluster name | `elastic` |
| `plugins` | Comma, semi-colon or space separated list of plugins to install at initialization | `nil` |
| `config` | Elasticsearch node custom configuration | `` |
| `extraVolumes` | Extra volumes | |
| `extraVolumeMounts` | Mount extra volume(s), | |
| `master.name` | Master-eligible node pod name | `master` |
| `master.replicas` | Desired number of Elasticsearch master-eligible nodes | `2` |
| `master.heapSize` | Master-eligible node heap size | `128m` |

View File

@@ -126,6 +126,9 @@ spec:
{{- end }}
- name: "data"
mountPath: "/bitnami/elasticsearch/data/"
{{- if .Values.extraVolumeMounts }}
{{ toYaml .Values.extraVolumeMounts | indent 8 }}
{{ end }}
volumes:
{{- if .Values.config }}
- name: "config"
@@ -134,3 +137,6 @@ spec:
{{- end }}
- name: "data"
emptyDir: {}
{{- if .Values.extraVolumes }}
{{- toYaml .Values.extraVolumes | nindent 6 }}
{{- end }}

View File

@@ -146,12 +146,18 @@ spec:
{{- end }}
- name: "data"
mountPath: "/bitnami/elasticsearch/data"
{{- if .Values.extraVolumeMounts }}
{{ toYaml .Values.extraVolumeMounts | indent 8 }}
{{ end }}
volumes:
{{- if .Values.config }}
- name: "config"
configMap:
name: {{ template "elasticsearch.fullname" . }}
{{- end }}
{{- if .Values.extraVolumes }}
{{- toYaml .Values.extraVolumes | nindent 6 }}
{{- end }}
{{- if .Values.data.persistence.enabled }}
volumeClaimTemplates:
- metadata:

View File

@@ -15,7 +15,6 @@ metadata:
annotations:
"helm.sh/hook": post-{{ $kind }}
"helm.sh/hook-weight": "1"
"helm.sh/hook-delete-policy": before-hook-creation
{{- if $.Values.cronjob.annotations }}
{{ toYaml $.Values.cronjob.annotations | indent 4 }}
{{- end }}

View File

@@ -128,6 +128,9 @@ spec:
{{- end }}
- name: "data"
mountPath: "/bitnami/elasticsearch/data/"
{{- if .Values.extraVolumeMounts }}
{{ toYaml .Values.extraVolumeMounts | indent 8 }}
{{ end }}
volumes:
{{- if .Values.config }}
- name: "config"
@@ -136,4 +139,7 @@ spec:
{{- end }}
- name: "data"
emptyDir: {}
{{- if .Values.extraVolumes }}
{{- toYaml .Values.extraVolumes | nindent 6 }}
{{- end }}
{{- end }}

View File

@@ -85,6 +85,8 @@ spec:
{{- $elasticsearchMasterFullname := include "elasticsearch.master.fullname" . }}
{{- $replicas := int .Values.master.replicas }}
value: {{range $i, $e := until $replicas }}{{ $elasticsearchMasterFullname }}-{{ $e }} {{ end }}
- name: ELASTICSEARCH_MINIMUM_MASTER_NODES
value: {{ add (div .Values.master.replicas 2) 1 | quote }}
{{- if .Values.plugins }}
- name: ELASTICSEARCH_PLUGINS
value: {{ .Values.plugins | quote }}
@@ -130,6 +132,9 @@ spec:
{{- end }}
- name: "data"
mountPath: "/bitnami/elasticsearch/data"
{{- if .Values.extraVolumeMounts }}
{{ toYaml .Values.extraVolumeMounts | indent 8 }}
{{ end }}
volumes:
{{- if .Values.config }}
- name: "config"
@@ -138,3 +143,6 @@ spec:
{{- end }}
- name: "data"
emptyDir: {}
{{- if .Values.extraVolumes }}
{{- toYaml .Values.extraVolumes | nindent 6 }}
{{- end }}

View File

@@ -14,7 +14,7 @@
image:
registry: docker.io
repository: bitnami/elasticsearch
tag: 7.4.2-debian-9-r1
tag: 7.4.2-debian-9-r9
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -86,6 +86,18 @@ name: elastic
##
# config:
# extraVolumes and extraVolumeMounts allows you to mount other volumes
# Example Use Case: mount ssl certificates when elasticsearch has tls enabled
# extraVolumes:
# - name: es-certs
# secret:
# defaultMode: 420
# secretName: es-certs
# extraVolumeMounts:
# - name: es-certs
# mountPath: /certs
# readOnly: true
## Elasticsearch master-eligible node parameters
##
master:
@@ -336,7 +348,7 @@ curator:
image:
registry: docker.io
repository: bitnami/elasticsearch-curator
tag: 5.8.1-debian-9-r9
tag: 5.8.1-debian-9-r16
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
@@ -494,7 +506,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/elasticsearch-exporter
tag: 1.1.0-debian-9-r72
tag: 1.1.0-debian-9-r78
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.

View File

@@ -14,7 +14,7 @@
image:
registry: docker.io
repository: bitnami/elasticsearch
tag: 7.4.2-debian-9-r1
tag: 7.4.2-debian-9-r9
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -86,6 +86,18 @@ name: elastic
##
# config:
# extraVolumes and extraVolumeMounts allows you to mount other volumes
# Example Use Case: mount ssl certificates when elasticsearch has tls enabled
# extraVolumes:
# - name: es-certs
# secret:
# defaultMode: 420
# secretName: es-certs
# extraVolumeMounts:
# - name: es-certs
# mountPath: /certs
# readOnly: true
## Elasticsearch master-eligible node parameters
##
master:
@@ -336,7 +348,7 @@ curator:
image:
registry: docker.io
repository: bitnami/elasticsearch-curator
tag: 5.8.1-debian-9-r9
tag: 5.8.1-debian-9-r16
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
@@ -345,6 +357,7 @@ curator:
# pullSecrets:
# - myRegistryKeySecretName
cronjob:
# At 01:00 every day
schedule: "0 1 * * *"
@@ -494,7 +507,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/elasticsearch-exporter
tag: 1.1.0-debian-9-r72
tag: 1.1.0-debian-9-r78
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.

View File

@@ -1,6 +1,6 @@
apiVersion: v1
name: etcd
version: 4.4.1
version: 4.4.2
appVersion: 3.4.3
description: etcd is a distributed key value store that provides a reliable way to store data across a cluster of machines
keywords:

View File

@@ -163,8 +163,7 @@ data:
exit 1
fi
{{- else }}
echo "==> Disaster recovery is disabled, the cluster cannot be recovered!" 1>&3 2>&4
exit 1
echo "==> Disaster recovery is disabled, the cluster will try to recover on it's own..." 1>&3 2>&4
{{- end }}
elif should_add_new_member; then
echo "==> Adding new member to existing cluster..." 1>&3 2>&4

View File

@@ -1,6 +1,6 @@
apiVersion: v1
name: fluentd
version: 0.2.0
version: 0.4.0
appVersion: 1.7.4
description: Fluentd is an open source data collector for unified logging layer
keywords:

View File

@@ -48,8 +48,8 @@ The command removes all the Kubernetes components associated with the chart and
The following tables lists the configurable parameters of the kibana chart and their default values.
| Parameter | Description | Default |
|-------------------------------------------------|----------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------|
| Parameter | Description | Default |
| ----------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------- |
| `global.imageRegistry` | Global Docker image registry | `nil` |
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) |
| `image.registry` | Fluentd image registry | `docker.io` |
@@ -60,10 +60,21 @@ The following tables lists the configurable parameters of the kibana chart and t
| `nameOverride` | String to partially override fluentd.fullname template with a string (will prepend the release name) | `nil` |
| `fullnameOverride` | String to fully override fluentd.fullname template with a string | `nil` |
| `clusterDomain` | Kubernetes DNS domain name to use | `cluster.local` |
| `forwarder.enabled` | Enable Fluentd aggregator | `true` |
| `forwarder.securityContext.enabled` | Enable security context for forwarder pods | `true` |
| `forwarder.securityContext.fsGroup` | Group ID for forwarder's containers filesystem | `0` |
| `forwarder.securityContext.runAsUser` | User ID for forwarder's containers | `0` |
| `forwarder.configFile` | Name of the config file that will be used by Fluentd at launch under the `/opt/bitnami/fluentd/conf` directory | `fluentd.conf` |
| `forwarder.configMap` | Name of the config map that contains the Fluentd configuration files | `nil` |
| `forwarder.extraArgs` | Extra arguments for the Fluentd command line | `nil` |
| `forwarder.extraEnv` | Extra environment variables to pass to the container | `{}` |
| `forwarder.containerPorts` | Ports the forwarder containers will listen on | `Check values.yaml` |
| `forwarder.service.type` | Kubernetes service type (`ClusterIP`, `NodePort`, or `LoadBalancer`) for the forwarders | `ClusterIP` |
| `forwarder.service.ports` | Array containing the forwarder service ports | `Check values.yaml file` |
| `forwarder.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` |
| `forwarder.service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` |
| `forwarder.service.clusterIP` | Static clusterIP or None for headless services | `nil` |
| `forwarder.service.annotations` | Annotations for the forwarder service | `{}` |
| `forwarder.livenessProbe.enabled` | Enable liveness probes for the forwarder | `true` |
| `forwarder.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `60` |
| `forwarder.livenessProbe.periodSeconds` | How often to perform the probe | `10` |
@@ -84,11 +95,21 @@ The following tables lists the configurable parameters of the kibana chart and t
| `forwarder.podAnnotations` | Pod annotations | `{}` |
| `aggregator.enabled` | Enable Fluentd aggregator | `true` |
| `aggregator.replicaCount` | Number of aggregator pods to deploy in the Stateful Set | `2` |
| `aggregator.securityContext.enabled` | Enable security context for aggregator pods | `true` |
| `aggregator.securityContext.fsGroup` | Group ID for aggregator's containers filesystem | `1001` |
| `aggregator.securityContext.runAsUser` | User ID for aggregator's containers | `1001` |
| `aggregator.configFile` | Name of the config file that will be used by Fluentd at launch under the `/opt/bitnami/fluentd/conf` directory | `fluentd.conf` |
| `aggregator.configMap` | Name of the config map that contains the Fluentd configuration files | `nil` |
| `aggregator.port` | Kubernetes Service port - Fluentd transport port for the aggregators | `24224` |
| `aggregator.extraArgs` | Extra arguments for the Fluentd command line | `nil` |
| `aggregator.extraEnv` | Extra environment variables to pass to the container | `{}` |
| `aggregator.containerPorts` | Ports the aggregator containers will listen on | `Check values.yaml` |
| `aggregator.service.type` | Kubernetes service type (`ClusterIP`, `NodePort`, or `LoadBalancer`) for the aggregators | `ClusterIP` |
| `aggregator.service.ports` | Array containing the aggregator service ports | `Check values.yaml file` |
| `aggregator.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` |
| `aggregator.service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` |
| `aggregator.service.clusterIP` | Static clusterIP or None for headless services | `nil` |
| `aggregator.service.annotations` | Annotations for the aggregator service | `{}` |
| `aggregator.livenessProbe.enabled` | Enable liveness probes for the aggregator | `true` |
| `aggregator.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `60` |
| `aggregator.livenessProbe.periodSeconds` | How often to perform the probe | `10` |
@@ -125,9 +146,6 @@ The following tables lists the configurable parameters of the kibana chart and t
| `tls.serverCertificate` | Server certificate | Server certificate content |
| `tls.serverKey` | Server Key | Server private key content |
| `tls.existingSecret` | Existing secret with certificate content | `nil` |
| `securityContext.enabled` | Enable security context | `true` |
| `securityContext.fsGroup` | Group ID for the container filesystem | `1001` |
| `securityContext.runAsUser` | User ID for the container | `1001` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,

View File

@@ -3,15 +3,22 @@
To verify that Fluentd has started, run:
kubectl get all -l "app.kubernetes.io/name={{ include "fluentd.name" . }},app.kubernetes.io/instance={{ .Release.Name }}"
{{ if and .Values.aggregator.enabled (not .Values.aggregator.configMap) }}
{{ if and .Values.aggregator.enabled .Values.forwarder.enabled (not .Values.aggregator.configMap) }}
Logs are captured on each node by the forwarder pods and then sent to the aggregator pods. By default, the aggregator pods send the logs to the standard output.
You can see all the logs by running this command:
kubectl logs -l "app.kubernetes.io/component=aggregator"
You can mount your own configuration files to the aggregators and the forwarders. For example, this is useful if you want to forward the aggregated logs to Elasticsearch or another service.
{{- else if and .Values.aggregator.enabled (not .Values.forwarder.enabled) }}
You have deployed Fluentd in aggregator-only mode. Logs received by the aggregator will be thrown to the standard output by default.
You can see all the logs by running this command:
kubectl logs -l "app.kubernetes.io/component=aggregator"
You can mount your own configuration files to the aggregators. For example, this is useful if you want to forward the logs to Elasticsearch or another service.
{{- else if and (not .Values.aggregator.enabled) (not .Values.forwarder.configMap) }}
Logs are captured on each node by the forwarder pods and sent to the standard output
Logs are captured on each node by the forwarder pods and sent to the standard output by default.
You can see all the logs by running this command:
kubectl logs -l "app.kubernetes.io/component=forwarder"
@@ -19,4 +26,5 @@
You can mount your own configuration files to the forwarders. For example, this is useful if you want to forward the logs to Elasticsearch or another service.
{{- end }}
{{- include "fluentd.validateValues" . }}
{{- include "fluentd.checkRollingTags" . -}}

View File

@@ -114,6 +114,7 @@ Validate data
*/}}
{{- define "fluentd.validateValues" -}}
{{- $messages := list -}}
{{- $messages := append $messages (include "fluentd.validateValues.deployment" .) -}}
{{- $messages := append $messages (include "fluentd.validateValues.rbac" .) -}}
{{- $messages := without $messages "" -}}
{{- $message := join "\n" $messages -}}
@@ -122,6 +123,15 @@ Validate data
{{- end -}}
{{- end -}}
{{/* Validate values of Fluentd - forwarders and aggregators can't be disabled at the same time */}}
{{- define "fluentd.validateValues.deployment" -}}
{{- if and (not .Values.forwarder.enabled) (not .Values.aggregator.enabled) -}}
fluentd:
You have disabled both the forwarders and the aggregators.
Please enable at least one of them (--set forwarder.enabled=true) (--set aggregator.enabled=true)
{{- end -}}
{{- end -}}
{{/* Validate values of Fluentd - must create serviceAccount to create enable RBAC */}}
{{- define "fluentd.validateValues.rbac" -}}
{{- if and .Values.rbac.create (not .Values.serviceAccount.create) -}}
@@ -163,3 +173,16 @@ Get the certificates secret name.
{{- printf "%s-tls" (include "fluentd.fullname" . ) -}}
{{- end -}}
{{- end -}}
{{/*
Renders a value that contains template.
Usage:
{{ include "fluentd.tplValue" (dict "value" .Values.path.to.the.Value "context" $) }}
*/}}
{{- define "fluentd.tplValue" -}}
{{- if typeIs "string" .value }}
{{- tpl .value .context }}
{{- else }}
{{- tpl (.value | toYaml) .context }}
{{- end }}
{{- end -}}

View File

@@ -37,12 +37,14 @@ data:
@type null
</match>
# TCP input to receive logs from the forwarders
# TCP input to receive logs from
{{- if and .Values.aggregator.port }}
<source>
@type forward
bind 0.0.0.0
port {{ .Values.aggregator.port }}
</source>
{{- end }}
# HTTP input for the liveness and readiness probes
<source>

View File

@@ -19,14 +19,14 @@ spec:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/aggregator-configmap.yaml") . | sha256sum }}
{{- if .Values.aggregator.podAnnotations }}
{{- toYaml .Values.aggregator.podAnnotations | nindent 8 }}
{{- include "fluentd.tplValue" (dict "value" .Values.aggregator.podAnnotations "context" $) | nindent 8 }}
{{- end }}
spec:
{{- include "fluentd.imagePullSecrets" . | nindent 6 }}
{{- if .Values.securityContext.enabled }}
{{- if .Values.aggregator.securityContext.enabled }}
securityContext:
runAsUser: {{ .Values.securityContext.runAsUser }}
fsGroup: {{ .Values.securityContext.fsGroup }}
runAsUser: {{ .Values.aggregator.securityContext.runAsUser }}
fsGroup: {{ .Values.aggregator.securityContext.fsGroup }}
{{- end }}
containers:
- name: fluentd
@@ -38,15 +38,17 @@ spec:
- name: FLUENTD_OPT
value: {{ .Values.aggregator.extraArgs | quote }}
{{- if .Values.aggregator.extraEnv }}
{{- toYaml .Values.aggregator.extraEnv | nindent 12 }}
{{- toYaml .Values.aggregator.extraEnv | nindent 8 }}
{{- end }}
ports:
- name: http
containerPort: 9880
protocol: TCP
{{- if .Values.aggregator.port }}
- name: tcp
containerPort: {{ .Values.aggregator.port }}
protocol: TCP
{{- end }}
{{- if .Values.aggregator.containerPorts }}
{{- toYaml .Values.aggregator.containerPorts | nindent 8 }}
{{- end }}
{{- if .Values.metrics.enabled }}
- name: metrics
containerPort: {{ .Values.metrics.service.port }}

View File

@@ -0,0 +1,32 @@
{{- if .Values.aggregator.service.ports }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "fluentd.fullname" . }}-aggregator
labels: {{- include "fluentd.labels" . | nindent 4 }}
app.kubernetes.io/component: aggregator
{{- if .Values.aggregator.service.annotations }}
annotations: {{- include "fluentd.tplValue" (dict "value" .Values.aggregator.service.annotations "context" $) | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.aggregator.service.type }}
{{- if and .Values.aggregator.service.loadBalancerIP (eq .Values.aggregator.service.type "LoadBalancer") }}
loadBalancerIP: {{ .Values.aggregator.service.loadBalancerIP }}
{{- end }}
{{- if and .Values.aggregator.service.loadBalancerSourceRanges (eq .Values.aggregator.service.type "LoadBalancer") }}
loadBalancerSourceRanges:
{{- with .Values.aggregator.service.loadBalancerSourceRanges }}
{{ toYaml . | nindent 4 }}
{{- end }}
{{- end }}
{{- if and (eq .Values.aggregator.service.type "ClusterIP") .Values.aggregator.service.clusterIP }}
clusterIP: {{ .Values.aggregator.service.clusterIP }}
{{- end }}
ports:
{{- range $key, $value := .Values.aggregator.service.ports }}
- name: {{ $key }}
{{ toYaml $value | nindent 6 }}
{{- end }}
selector: {{ include "fluentd.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: aggregator
{{- end }}

View File

@@ -1,4 +1,4 @@
{{- if not .Values.forwarder.configMap -}}
{{- if and .Values.forwarder.enabled (not .Values.forwarder.configMap) -}}
apiVersion: v1
kind: ConfigMap
metadata:

View File

@@ -1,3 +1,4 @@
{{- if .Values.forwarder.enabled }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
@@ -16,15 +17,15 @@ spec:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/forwarder-configmap.yaml") . | sha256sum }}
{{- if .Values.forwarder.podAnnotations }}
{{- toYaml .Values.forwarder.podAnnotations | nindent 8 }}
{{- include "fluentd.tplValue" (dict "value" .Values.forwarder.podAnnotations "context" $) | nindent 8 }}
{{- end }}
spec:
{{- include "fluentd.imagePullSecrets" . | nindent 6 }}
serviceAccountName: {{ template "fluentd.serviceAccountName" . }}
{{- if .Values.securityContext.enabled }}
{{- if .Values.forwarder.securityContext.enabled }}
securityContext:
runAsUser: {{ .Values.securityContext.runAsUser }}
fsGroup: {{ .Values.securityContext.fsGroup }}
runAsUser: {{ .Values.forwarder.securityContext.runAsUser }}
fsGroup: {{ .Values.forwarder.securityContext.fsGroup }}
{{- end }}
containers:
- name: fluentd
@@ -39,9 +40,9 @@ spec:
{{- toYaml .Values.forwarder.extraEnv | nindent 8 }}
{{- end }}
ports:
- name: http
containerPort: 9880
protocol: TCP
{{- if .Values.forwarder.containerPorts }}
{{- toYaml .Values.forwarder.containerPorts | nindent 8 }}
{{- end }}
{{- if .Values.metrics.enabled }}
- name: metrics
containerPort: {{ .Values.metrics.service.port }}
@@ -114,3 +115,4 @@ spec:
{{- with .Values.forwarder.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,32 @@
{{- if and .Values.forwarder.enabled .Values.forwarder.service.ports }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "fluentd.fullname" . }}-forwarder
labels: {{- include "fluentd.labels" . | nindent 4 }}
app.kubernetes.io/component: forwarder
{{- if .Values.forwarder.service.annotations }}
annotations: {{- include "fluentd.tplValue" (dict "value" .Values.forwarder.service.annotations "context" $) | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.forwarder.service.type }}
{{- if and .Values.forwarder.service.loadBalancerIP (eq .Values.forwarder.service.type "LoadBalancer") }}
loadBalancerIP: {{ .Values.forwarder.service.loadBalancerIP }}
{{- end }}
{{- if and .Values.forwarder.service.loadBalancerSourceRanges (eq .Values.forwarder.service.type "LoadBalancer") }}
loadBalancerSourceRanges:
{{- with .Values.forwarder.service.loadBalancerSourceRanges }}
{{ toYaml . | nindent 4 }}
{{- end }}
{{- end }}
{{- if and (eq .Values.forwarder.service.type "ClusterIP") .Values.forwarder.service.clusterIP }}
clusterIP: {{ .Values.forwarder.service.clusterIP }}
{{- end }}
ports:
{{- range $key, $value := .Values.forwarder.service.ports }}
- name: {{ $key }}
{{ toYaml $value | nindent 6 }}
{{- end }}
selector: {{ include "fluentd.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: forwarder
{{- end }}

View File

@@ -2,9 +2,9 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "fluentd.fullname" . }}
name: {{ include "fluentd.fullname" . }}-metrics
labels: {{- include "fluentd.labels" . | nindent 4 }}
annotations: {{- tpl (toYaml .Values.metrics.service.annotations) $ | nindent 4 }}
annotations: {{- include "fluentd.tplValue" (dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }}
spec:
type: {{ .Values.metrics.service.type }}
{{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }}

View File

@@ -1,4 +1,4 @@
{{- if .Values.aggregator.enabled -}}
{{- if and .Values.aggregator.enabled .Values.aggregator.service.ports -}}
apiVersion: v1
kind: Service
metadata:
@@ -9,9 +9,10 @@ spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp
port: {{ .Values.aggregator.port }}
targetPort: tcp
{{- range $key, $value := .Values.aggregator.service.ports }}
- name: {{ $key }}
{{ toYaml $value | nindent 6 }}
{{- end }}
selector: {{- include "fluentd.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: aggregator
{{- end -}}

View File

@@ -13,7 +13,7 @@ global: {}
image:
registry: docker.io
repository: bitnami/fluentd
tag: 1.7.4-debian-9-r0
tag: 1.7.4-debian-9-r13
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -38,6 +38,16 @@ image:
clusterDomain: cluster.local
forwarder:
enabled: true
## K8s Security Context for forwarder pods
## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
runAsUser: 0
fsGroup: 0
## Name of the config file that will be used by Fluentd at launch
## Fluentd will look for it under the /opt/bitnami/fluentd/conf directory
##
@@ -59,6 +69,68 @@ forwarder:
##
extraEnv: {}
containerPorts:
- name: http
containerPort: 9880
protocol: TCP
# - name: syslog-tcp
# containerPort: 5140
# protocol: TCP
# - name: syslog-udp
# containerPort: 5140
# protocol: UDP
# - name: tcp
# containerPort: 24224
# protocol: TCP
## Service parameters
##
service:
## Service type
##
type: ClusterIP
## Service ports
##
ports:
# http:
# port: 9880
# targetPort: http
# protocol: TCP
# syslog-udp:
# port: 5140
# targetPort: syslog-udp
# protocol: UDP
# nodePort: 31514
# syslog-tcp:
# port: 5140
# targetPort: syslog-tcp
# protocol: TCP
# nodePort: 31514
# tcp:
# port: 24224
# targetPort: tcp
# protocol: TCP
## loadBalancerIP for the forwarders service (optional, cloud specific)
## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer
# loadBalancerIP:
## Load Balancer sources
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
# loadBalancerSourceRanges:
# - 10.10.10.0/24
## Set the Cluster IP to use
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
##
# clusterIP: None
## Provide any additional annotations which may be required
##
annotations: {}
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
@@ -124,6 +196,14 @@ aggregator:
##
replicaCount: 2
## K8s Security Context for forwarder pods
## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
runAsUser: 1001
fsGroup: 1001
## Name of the config file that will be used by Fluentd at launch
## Fluentd will look for it under the /opt/bitnami/fluentd/conf directory
##
@@ -133,7 +213,8 @@ aggregator:
## If not specified, one will be created by default
# configMap:
## Port which the forwarders will connect to to send the logs
## Port the aggregator container will listen for logs. Leave it blank to ignore.
## You can specify other ports in the aggregator.containerPorts parameter
##
port: 24224
@@ -149,6 +230,53 @@ aggregator:
##
extraEnv: {}
containerPorts:
# - name: my-port
# containerPort: 24222
# protocol: TCP
- name: http
containerPort: 9880
protocol: TCP
## Service parameters
##
service:
## Service type
##
type: ClusterIP
## Service ports
##
ports:
tcp:
port: 24224
targetPort: tcp
protocol: TCP
# http:
# port: 9880
# targetPort: http
# protocol: TCP
## loadBalancerIP for the forwarders service (optional, cloud specific)
## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer
##
# loadBalancerIP:
## Load Balancer sources
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
# loadBalancerSourceRanges:
# - 10.10.10.0/24
## Set the Cluster IP to use
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
##
# clusterIP: None
## Provide any additional annotations which may be required
##
annotations: {}
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
@@ -272,10 +400,3 @@ tls:
certificate: |-
key: |-
# existingSecret: name-of-existing-secret-to-certificates
## SecurityContext configuration
##
securityContext:
enabled: true
runAsUser: 1001
fsGroup: 1001

View File

@@ -13,7 +13,7 @@ global: {}
image:
registry: docker.io
repository: bitnami/fluentd
tag: 1.7.4-debian-9-r0
tag: 1.7.4-debian-9-r13
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -38,6 +38,16 @@ image:
clusterDomain: cluster.local
forwarder:
enabled: true
## K8s Security Context for forwarder pods
## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
runAsUser: 0
fsGroup: 0
## Name of the config file that will be used by Fluentd at launch
## Fluentd will look for it under the /opt/bitnami/fluentd/conf directory
##
@@ -59,6 +69,68 @@ forwarder:
##
extraEnv: {}
containerPorts:
- name: http
containerPort: 9880
protocol: TCP
# - name: syslog-tcp
# containerPort: 5140
# protocol: TCP
# - name: syslog-udp
# containerPort: 5140
# protocol: UDP
# - name: tcp
# containerPort: 24224
# protocol: TCP
## Service parameters
##
service:
## Service type
##
type: ClusterIP
## Service ports
##
ports:
# http:
# port: 9880
# targetPort: http
# protocol: TCP
# syslog-udp:
# port: 5140
# targetPort: syslog-udp
# protocol: UDP
# nodePort: 31514
# syslog-tcp:
# port: 5140
# targetPort: syslog-tcp
# protocol: TCP
# nodePort: 31514
# tcp:
# port: 24224
# targetPort: tcp
# protocol: TCP
## loadBalancerIP for the forwarders service (optional, cloud specific)
## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer
# loadBalancerIP:
## Load Balancer sources
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
# loadBalancerSourceRanges:
# - 10.10.10.0/24
## Set the Cluster IP to use
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
##
# clusterIP: None
## Provide any additional annotations which may be required
##
annotations: {}
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
@@ -124,6 +196,14 @@ aggregator:
##
replicaCount: 1
## K8s Security Context for aggregator pods
## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
runAsUser: 1001
fsGroup: 1001
## Name of the config file that will be used by Fluentd at launch
## Fluentd will look for it under the /opt/bitnami/fluentd/conf directory
##
@@ -133,7 +213,8 @@ aggregator:
## If not specified, one will be created by default
# configMap:
## Port which the forwarders will connect to to send the logs
## Port the aggregator container will listen for logs. Leave it blank to ignore.
## You can specify other ports in the aggregator.containerPorts parameter
##
port: 24224
@@ -149,6 +230,53 @@ aggregator:
##
extraEnv: {}
containerPorts:
# - name: my-port
# containerPort: 24222
# protocol: TCP
- name: http
containerPort: 9880
protocol: TCP
## Service parameters
##
service:
## Service type
##
type: ClusterIP
## Service ports
##
ports:
tcp:
port: 24224
targetPort: tcp
protocol: TCP
# http:
# port: 9880
# targetPort: http
# protocol: TCP
## loadBalancerIP for the forwarders service (optional, cloud specific)
## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer
##
# loadBalancerIP:
## Load Balancer sources
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
# loadBalancerSourceRanges:
# - 10.10.10.0/24
## Set the Cluster IP to use
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
##
# clusterIP: None
## Provide any additional annotations which may be required
##
annotations: {}
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
@@ -272,10 +400,3 @@ tls:
certificate: |-
key: |-
# existingSecret: name-of-existing-secret-to-certificates
## SecurityContext configuration
##
securityContext:
enabled: true
runAsUser: 1001
fsGroup: 1001

View File

@@ -1,6 +1,6 @@
apiVersion: v1
name: grafana
version: 1.1.1
version: 1.1.2
appVersion: 6.4.4
description: Grafana is an open source, feature rich metrics dashboard and graph editor for Graphite, Elasticsearch, OpenTSDB, Prometheus and InfluxDB.
keywords:

View File

@@ -25,7 +25,7 @@
2. Get the admin credentials:
echo "User: {{ .Values.admin.user }}"
echo "Password: $(kubectl get secret {{ include "grafana.fullname" . }}-secret -o jsonpath="{.data.GF_SECURITY_ADMIN_PASSWORD}" | base64 --decode)"
echo "Password: $(kubectl get secret {{ include "grafana.fullname" . }}-secret --namespace {{ .Release.Namespace }} -o jsonpath="{.data.GF_SECURITY_ADMIN_PASSWORD}" | base64 --decode)"
{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }}

View File

@@ -1,6 +1,6 @@
apiVersion: v1
name: harbor
version: 2.6.14
version: 3.0.0
appVersion: 1.9.1
description: Harbor is an an open source trusted cloud native registry project that stores, signs, and scans content
keywords:

View File

@@ -388,6 +388,14 @@ You can enable this initContainer by setting `volumePermissions.enabled` to `tru
## Upgrade
## 3.0.0
Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec.
In c085d396a0515be7217d65e92f4fbd474840908b the `apiVersion` of the deployment resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage.
This major version signifies this change.
## 2.0.0
In this version, two major changes were performed:

View File

@@ -1,9 +1,9 @@
dependencies:
- name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 6.5.3
version: 7.0.0
- name: redis
repository: https://charts.bitnami.com/bitnami
version: 9.4.3
digest: sha256:99c086e6c1e8c381e164fb75d158887bf9929a2babce2f2dd6778b71e3a7820f
generated: 2019-10-29T09:56:40.58331808Z
version: 9.5.2
digest: sha256:5fdd20635c94b258d24a4d774e33d66e0b44006191d37e68ca6ad3fdcadecb14
generated: "2019-11-09T11:49:29.085223686+05:30"

View File

@@ -1,6 +1,6 @@
dependencies:
- name: postgresql
version: 6.x.x
version: 7.x.x
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled
- name: redis

View File

@@ -819,3 +819,14 @@ but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for deployment.
*/}}
{{- define "deployment.apiVersion" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
{{- print "extensions/v1beta1" -}}
{{- else -}}
{{- print "apps/v1" -}}
{{- end -}}
{{- end -}}

View File

@@ -1,5 +1,5 @@
{{- if .Values.chartmuseum.enabled }}
apiVersion: apps/v1
apiVersion: {{ template "deployment.apiVersion" . }}
kind: Deployment
metadata:
name: "{{ template "harbor.chartmuseum" . }}"

View File

@@ -1,5 +1,5 @@
{{- if .Values.clair.enabled }}
apiVersion: apps/v1
apiVersion: {{ template "deployment.apiVersion" . }}
kind: Deployment
metadata:
name: "{{ template "harbor.clair" . }}"

View File

@@ -1,4 +1,4 @@
apiVersion: apps/v1
apiVersion: {{ template "deployment.apiVersion" . }}
kind: Deployment
metadata:
name: "{{ template "harbor.core" . }}"

View File

@@ -1,4 +1,4 @@
apiVersion: apps/v1
apiVersion: {{ template "deployment.apiVersion" . }}
kind: Deployment
metadata:
name: "{{ template "harbor.jobservice" . }}"

View File

@@ -1,5 +1,5 @@
{{- if ne .Values.service.type "Ingress" }}
apiVersion: apps/v1
apiVersion: {{ template "deployment.apiVersion" . }}
kind: Deployment
metadata:
name: "{{ template "harbor.nginx" . }}"

View File

@@ -1,4 +1,4 @@
apiVersion: apps/v1
apiVersion: {{ template "deployment.apiVersion" . }}
kind: Deployment
metadata:
name: "{{ template "harbor.portal" . }}"

View File

@@ -1,4 +1,4 @@
apiVersion: apps/v1
apiVersion: {{ template "deployment.apiVersion" . }}
kind: Deployment
metadata:
name: "{{ template "harbor.registry" . }}"

View File

@@ -1,5 +1,5 @@
apiVersion: v1
appVersion: 1.7.8
appVersion: 1.7.9
description: InfluxDB is an open source time-series database designed to handle large write and read loads in real-time.
engine: gotpl
home: https://www.influxdata.com/products/influxdb-overview/

View File

@@ -148,6 +148,7 @@ spec:
containerPort: 8088
protocol: TCP
{{- if .Values.influxdb.livenessProbe.enabled }}
{{- $livenessTimeout := sub (int .Values.influxdb.livenessProbe.timeoutSeconds) 1 }}
livenessProbe:
exec:
command:
@@ -157,7 +158,7 @@ spec:
if [[ -f "${INFLUXDB_ADMIN_USER_PASSWORD_FILE:-}" ]]; then
export INFLUXDB_ADMIN_USER_PASSWORD="$(< "${INFLUXDB_ADMIN_USER_PASSWORD_FILE}")"
fi
{{ if .Values.authEnabled }}INFLUX_USERNAME="$INFLUXDB_ADMIN_USER" INFLUX_PASSWORD="$INFLUXDB_ADMIN_USER_PASSWORD"{{ end }} influx -host 127.0.0.1 -port 8086 -execute "SHOW DATABASES"
{{ if .Values.authEnabled }}INFLUX_USERNAME="$INFLUXDB_ADMIN_USER" INFLUX_PASSWORD="$INFLUXDB_ADMIN_USER_PASSWORD"{{ end }} timeout {{ $livenessTimeout }}s influx -host 127.0.0.1 -port 8086 -execute "SHOW DATABASES"
initialDelaySeconds: {{ .Values.influxdb.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.influxdb.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.influxdb.livenessProbe.timeoutSeconds }}
@@ -165,6 +166,7 @@ spec:
failureThreshold: {{ .Values.influxdb.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.influxdb.readinessProbe.enabled }}
{{- $readinessTimeout := sub (int .Values.influxdb.readinessProbe.timeoutSeconds) 1 }}
readinessProbe:
exec:
command:
@@ -174,7 +176,7 @@ spec:
if [[ -f "${INFLUXDB_ADMIN_USER_PASSWORD_FILE:-}" ]]; then
export INFLUXDB_ADMIN_USER_PASSWORD="$(< "${INFLUXDB_ADMIN_USER_PASSWORD_FILE}")"
fi
{{ if .Values.authEnabled }}INFLUX_USERNAME="$INFLUXDB_ADMIN_USER" INFLUX_PASSWORD="$INFLUXDB_ADMIN_USER_PASSWORD"{{ end }} influx -host 127.0.0.1 -port 8086 -execute "SHOW DATABASES"
{{ if .Values.authEnabled }}INFLUX_USERNAME="$INFLUXDB_ADMIN_USER" INFLUX_PASSWORD="$INFLUXDB_ADMIN_USER_PASSWORD"{{ end }} timeout {{ $readinessTimeout }}s influx -host 127.0.0.1 -port 8086 -execute "SHOW DATABASES"
initialDelaySeconds: {{ .Values.influxdb.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.influxdb.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.influxdb.readinessProbe.timeoutSeconds }}

View File

@@ -154,6 +154,7 @@ spec:
containerPort: 8088
protocol: TCP
{{- if .Values.influxdb.livenessProbe.enabled }}
{{- $livenessTimeout := sub (int .Values.influxdb.livenessProbe.timeoutSeconds) 1 }}
livenessProbe:
exec:
command:
@@ -163,7 +164,7 @@ spec:
if [[ -f "${INFLUXDB_ADMIN_USER_PASSWORD_FILE:-}" ]]; then
export INFLUXDB_ADMIN_USER_PASSWORD="$(< "${INFLUXDB_ADMIN_USER_PASSWORD_FILE}")"
fi
{{ if .Values.authEnabled }}INFLUX_USERNAME="$INFLUXDB_ADMIN_USER" INFLUX_PASSWORD="$INFLUXDB_ADMIN_USER_PASSWORD"{{ end }} influx -host 127.0.0.1 -port 8086 -execute "SHOW DATABASES"
{{ if .Values.authEnabled }}INFLUX_USERNAME="$INFLUXDB_ADMIN_USER" INFLUX_PASSWORD="$INFLUXDB_ADMIN_USER_PASSWORD"{{ end }} timeout {{ $livenessTimeout }}s influx -host 127.0.0.1 -port 8086 -execute "SHOW DATABASES"
initialDelaySeconds: {{ .Values.influxdb.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.influxdb.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.influxdb.livenessProbe.timeoutSeconds }}
@@ -171,6 +172,7 @@ spec:
failureThreshold: {{ .Values.influxdb.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.influxdb.readinessProbe.enabled }}
{{- $readinessTimeout := sub (int .Values.influxdb.readinessProbe.timeoutSeconds) 1 }}
readinessProbe:
exec:
command:
@@ -180,7 +182,7 @@ spec:
if [[ -f "${INFLUXDB_ADMIN_USER_PASSWORD_FILE:-}" ]]; then
export INFLUXDB_ADMIN_USER_PASSWORD="$(< "${INFLUXDB_ADMIN_USER_PASSWORD_FILE}")"
fi
{{ if .Values.authEnabled }}INFLUX_USERNAME="$INFLUXDB_ADMIN_USER" INFLUX_PASSWORD="$INFLUXDB_ADMIN_USER_PASSWORD"{{ end }} influx -host 127.0.0.1 -port 8086 -execute "SHOW DATABASES"
{{ if .Values.authEnabled }}INFLUX_USERNAME="$INFLUXDB_ADMIN_USER" INFLUX_PASSWORD="$INFLUXDB_ADMIN_USER_PASSWORD"{{ end }} timeout {{ $readinessTimeout }}s influx -host 127.0.0.1 -port 8086 -execute "SHOW DATABASES"
initialDelaySeconds: {{ .Values.influxdb.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.influxdb.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.influxdb.readinessProbe.timeoutSeconds }}

View File

@@ -18,7 +18,7 @@
image:
registry: docker.io
repository: bitnami/influxdb
tag: 1.7.8-debian-9-r17
tag: 1.7.9-debian-9-r0
## Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
@@ -195,14 +195,14 @@ influxdb:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
timeoutSeconds: 15
successThreshold: 1
failureThreshold: 6
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
timeoutSeconds: 15
successThreshold: 1
failureThreshold: 6

View File

@@ -18,7 +18,7 @@
image:
registry: docker.io
repository: bitnami/influxdb
tag: 1.7.8-debian-9-r17
tag: 1.7.9-debian-9-r0
## Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
@@ -195,14 +195,14 @@ influxdb:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
timeoutSeconds: 15
successThreshold: 1
failureThreshold: 6
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
timeoutSeconds: 15
successThreshold: 1
failureThreshold: 6

View File

@@ -1,6 +1,6 @@
apiVersion: v1
name: jenkins
version: 3.4.9
version: 4.0.1
appVersion: 2.190.2
description: The leading open source automation server
keywords:

View File

@@ -150,6 +150,14 @@ See the [Parameters](#parameters) section to configure the PVC or to disable per
## Upgrading
### To 4.0.0
Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec.
In 4dfac075aacf74405e31ae5b27df4369e84eb0b0 the `apiVersion` of the deployment resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage.
This major version signifies this change.
### To 1.0.0
Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments.

View File

@@ -145,3 +145,14 @@ but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for deployment.
*/}}
{{- define "jenkins.deployment.apiVersion" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
{{- print "extensions/v1beta1" -}}
{{- else -}}
{{- print "apps/v1" -}}
{{- end -}}
{{- end -}}

View File

@@ -1,4 +1,4 @@
apiVersion: apps/v1
apiVersion: {{ template "jenkins.deployment.apiVersion" . }}
kind: Deployment
metadata:
name: {{ template "jenkins.fullname" . }}

View File

@@ -14,7 +14,7 @@
image:
registry: docker.io
repository: bitnami/jenkins
tag: 2.190.2-debian-9-r1
tag: 2.190.2-debian-9-r2
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -188,7 +188,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/jenkins-exporter
tag: 0.20171225.0-debian-9-r78
tag: 0.20171225.0-debian-9-r93
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.

View File

@@ -1,19 +1,19 @@
apiVersion: v1
name: kafka
version: 6.1.6
version: 7.0.2
appVersion: 2.3.1
description: Apache Kafka is a distributed streaming platform.
keywords:
- kafka
- zookeeper
- streaming
- producer
- consumer
- kafka
- zookeeper
- streaming
- producer
- consumer
home: https://kafka.apache.org/
sources:
- https://github.com/bitnami/bitnami-docker-kafka
- https://github.com/bitnami/bitnami-docker-kafka
maintainers:
- name: Bitnami
email: containers@bitnami.com
- name: Bitnami
email: containers@bitnami.com
engine: gotpl
icon: https://bitnami.com/assets/stacks/kafka/img/kafka-stack-110x117.png

View File

@@ -144,7 +144,7 @@ The following tables lists the configurable parameters of the Kafka chart and th
| `readinessProbe.timeoutSeconds` | When the probe times out | 5 |
| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 |
| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 |
| `metrics.kafka.enabled` | Whether or not to create a separate Kafka exporter | `false` |
| `metrics.kafka.enabled` | Whether or not to create a standalone Kafka exporter to expose Kafka metrics | `false` |
| `metrics.kafka.image.registry` | Kafka exporter image registry | `docker.io` |
| `metrics.kafka.image.repository` | Kafka exporter image name | `bitnami/kafka-exporter` |
| `metrics.kafka.image.tag` | Kafka exporter image tag | `{TAG_NAME}` |
@@ -153,7 +153,12 @@ The following tables lists the configurable parameters of the Kafka chart and th
| `metrics.kafka.interval` | Interval that Prometheus scrapes Kafka metrics when using Prometheus Operator | `10s` |
| `metrics.kafka.port` | Kafka Exporter Port which exposes metrics in Prometheus format for scraping | `9308` |
| `metrics.kafka.resources` | Allows setting resource limits for kafka-exporter pod | `{}` |
| `metrics.jmx.resources` | Allows setting resource limits for jmx sidecar container | `{}` |
| `metrics.kafka.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for Kafka Exporter | `ClusterIP` |
| `metrics.kafka.service.port` | Kafka Exporter Prometheus port | `9308` |
| `metrics.kafka.service.nodePort` | Kubernetes HTTP node port | `""` |
| `metrics.kafka.service.annotations` | Annotations for Prometheus metrics service | `Check values.yaml file` |
| `metrics.kafka.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` |
| `metrics.kafka.service.clusterIP` | Static clusterIP or None for headless services | `nil` |
| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` |
| `metrics.jmx.image.registry` | JMX exporter image registry | `docker.io` |
| `metrics.jmx.image.repository` | JMX exporter image name | `bitnami/jmx-exporter` |
@@ -162,6 +167,13 @@ The following tables lists the configurable parameters of the Kafka chart and th
| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) |
| `metrics.jmx.interval` | Interval that Prometheus scrapes JMX metrics when using Prometheus Operator | `10s` |
| `metrics.jmx.exporterPort` | JMX Exporter Port which exposes metrics in Prometheus format for scraping | `5556` |
| `metrics.jmx.resources` | Allows setting resource limits for jmx sidecar container | `{}` |
| `metrics.jmx.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for JMX Exporter | `ClusterIP` |
| `metrics.jmx.service.port` | JMX Exporter Prometheus port | `5556` |
| `metrics.jmx.service.nodePort` | Kubernetes HTTP node port | `""` |
| `metrics.jmx.service.annotations` | Annotations for Prometheus metrics service | `Check values.yaml file` |
| `metrics.jmx.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` |
| `metrics.jmx.service.clusterIP` | Static clusterIP or None for headless services | `nil` |
| `metrics.jmx.configMap.enabled` | Enable the default ConfigMap for JMX | `true` |
| `metrics.jmx.configMap.overrideConfig` | Allows config file to be generated by passing values to ConfigMap | `{}` |
| `metrics.jmx.configMap.overrideName` | Allows setting the name of the ConfigMap to be used | `""` |
@@ -319,6 +331,16 @@ You can enable this initContainer by setting `volumePermissions.enabled` to `tru
## Upgrading
### To 7.0.0
Backwards compatibility is not guaranteed when Kafka metrics are enabled, unless you modify the labels used on the exporter deployments.
Use the workaround below to upgrade from versions previous to 7.0.0. The following example assumes that the release name is kafka:
```console
$ helm upgrade kafka bitnami/kafka --version 6.1.8 --set metrics.kafka.enabled=false
$ helm upgrade kafka bitnami/kafka --version 7.0.0 --set metrics.kafka.enabled=true
```
### To 2.0.0
Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments.

View File

@@ -1,6 +1,6 @@
dependencies:
- name: zookeeper
repository: https://charts.bitnami.com/bitnami
version: 5.0.7
- name: zookeeper
repository: https://charts.bitnami.com/bitnami
version: 5.0.7
digest: sha256:2f3c43ce02e3966648b8c89be121fe39537f62ea1d161ad908f51ddc90e4243e
generated: 2019-10-25T09:09:35.628793626Z

View File

@@ -1,5 +1,5 @@
dependencies:
- name: zookeeper
version: 5.x.x
repository: https://charts.bitnami.com/bitnami
condition: zookeeper.enabled
- name: zookeeper
version: 5.x.x
repository: https://charts.bitnami.com/bitnami
condition: zookeeper.enabled

View File

@@ -31,6 +31,24 @@ Create chart name and version as used by the chart label.
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "kafka.labels" -}}
app.kubernetes.io/name: {{ include "kafka.name" . }}
helm.sh/chart: {{ include "kafka.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector
*/}}
{{- define "kafka.matchLabels" -}}
app.kubernetes.io/name: {{ include "kafka.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Return the proper Kafka image name
*/}}
@@ -217,3 +235,16 @@ but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Renders a value that contains template.
Usage:
{{ include "kafka.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }}
*/}}
{{- define "kafka.tplValue" -}}
{{- if typeIs "string" .value }}
{{- tpl .value .context }}
{{- else }}
{{- tpl (.value | toYaml) .context }}
{{- end }}
{{- end -}}

View File

@@ -3,11 +3,7 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "kafka.fullname" . }}-configuration
labels:
app.kubernetes.io/name: {{ template "kafka.name" . }}
helm.sh/chart: {{ template "kafka.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
labels: {{- include "kafka.labels" . | nindent 4 }}
data:
server.properties: |-
{{ .Values.config | indent 4 }}

View File

@@ -21,47 +21,47 @@ data:
whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"]
{{ end }}
rules:
- pattern: kafka.controller<type=(ControllerChannelManager), name=(QueueSize), broker-id=(\d+)><>(Value)
name: kafka_controller_$1_$2_$4
labels:
broker_id: "$3"
- pattern: kafka.controller<type=(ControllerChannelManager), name=(TotalQueueSize)><>(Value)
name: kafka_controller_$1_$2_$3
- pattern: kafka.controller<type=(KafkaController), name=(.+)><>(Value)
name: kafka_controller_$1_$2_$3
- pattern: kafka.controller<type=(ControllerStats), name=(.+)><>(Count)
name: kafka_controller_$1_$2_$3
- pattern: kafka.server<type=(ReplicaFetcherManager), name=(.+), clientId=(.+)><>(Value)
name: kafka_server_$1_$2_$4
labels:
client_id: "$3"
- pattern : kafka.network<type=(Processor), name=(IdlePercent), networkProcessor=(.+)><>(Value)
name: kafka_network_$1_$2_$4
labels:
network_processor: $3
- pattern : kafka.network<type=(RequestMetrics), name=(RequestsPerSec), request=(.+)><>(Count)
name: kafka_network_$1_$2_$4
labels:
request: $3
- pattern: kafka.server<type=(.+), name=(.+), topic=(.+)><>(Count|OneMinuteRate)
name: kafka_server_$1_$2_$4
labels:
topic: $3
- pattern: kafka.server<type=(DelayedOperationPurgatory), name=(.+), delayedOperation=(.+)><>(Value)
name: kafka_server_$1_$2_$3_$4
- pattern: kafka.server<type=(.+), name=(.+)><>(Count|Value|OneMinuteRate)
name: kafka_server_$1_total_$2_$3
- pattern: kafka.server<type=(.+)><>(queue-size)
name: kafka_server_$1_$2
- pattern: java.lang<type=(.+), name=(.+)><(.+)>(\w+)
name: java_lang_$1_$4_$3_$2
- pattern: java.lang<type=(.+), name=(.+)><>(\w+)
name: java_lang_$1_$3_$2
- pattern : java.lang<type=(.*)>
- pattern: kafka.log<type=(.+), name=(.+), topic=(.+), partition=(.+)><>Value
name: kafka_log_$1_$2
labels:
topic: $3
partition: $4
- pattern: kafka.controller<type=(ControllerChannelManager), name=(QueueSize), broker-id=(\d+)><>(Value)
name: kafka_controller_$1_$2_$4
labels:
broker_id: "$3"
- pattern: kafka.controller<type=(ControllerChannelManager), name=(TotalQueueSize)><>(Value)
name: kafka_controller_$1_$2_$3
- pattern: kafka.controller<type=(KafkaController), name=(.+)><>(Value)
name: kafka_controller_$1_$2_$3
- pattern: kafka.controller<type=(ControllerStats), name=(.+)><>(Count)
name: kafka_controller_$1_$2_$3
- pattern: kafka.server<type=(ReplicaFetcherManager), name=(.+), clientId=(.+)><>(Value)
name: kafka_server_$1_$2_$4
labels:
client_id: "$3"
- pattern : kafka.network<type=(Processor), name=(IdlePercent), networkProcessor=(.+)><>(Value)
name: kafka_network_$1_$2_$4
labels:
network_processor: $3
- pattern : kafka.network<type=(RequestMetrics), name=(RequestsPerSec), request=(.+)><>(Count)
name: kafka_network_$1_$2_$4
labels:
request: $3
- pattern: kafka.server<type=(.+), name=(.+), topic=(.+)><>(Count|OneMinuteRate)
name: kafka_server_$1_$2_$4
labels:
topic: $3
- pattern: kafka.server<type=(DelayedOperationPurgatory), name=(.+), delayedOperation=(.+)><>(Value)
name: kafka_server_$1_$2_$3_$4
- pattern: kafka.server<type=(.+), name=(.+)><>(Count|Value|OneMinuteRate)
name: kafka_server_$1_total_$2_$3
- pattern: kafka.server<type=(.+)><>(queue-size)
name: kafka_server_$1_$2
- pattern: java.lang<type=(.+), name=(.+)><(.+)>(\w+)
name: java_lang_$1_$4_$3_$2
- pattern: java.lang<type=(.+), name=(.+)><>(\w+)
name: java_lang_$1_$3_$2
- pattern : java.lang<type=(.*)>
- pattern: kafka.log<type=(.+), name=(.+), topic=(.+), partition=(.+)><>Value
name: kafka_log_$1_$2
labels:
topic: $3
partition: $4
{{- end }}
{{- end -}}

View File

@@ -3,38 +3,29 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "kafka.fullname" . }}-exporter
labels:
app.kubernetes.io/name: {{ template "kafka.name" . }}
helm.sh/chart: {{ template "kafka.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
labels: {{- include "kafka.labels" . | nindent 4 }}
app.kubernetes.io/component: metrics
spec:
replicas: 1
selector:
matchLabels:
app: {{ template "kafka.name" . }}
release: "{{ .Release.Name }}"
matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: metrics
template:
metadata:
annotations:
{{- if and .Values.metrics.kafka.enabled }}
prometheus.io/scrape: "true"
prometheus.io/port: {{ .Values.metrics.kafka.port | quote }}
{{- end }}
labels:
app: {{ template "kafka.name" . }}
release: "{{ .Release.Name }}"
labels: {{- include "kafka.labels" . | nindent 8 }}
app.kubernetes.io/component: metrics
spec:
{{- include "kafka.imagePullSecrets" . | indent 6 }}
containers:
- image: {{ template "kafka.metrics.kafka.image" . }}
name: kafka-exporter
args:
- --kafka.server={{ template "kafka.fullname" . }}:{{ .Values.service.port }}
- --web.listen-address=:{{ .Values.metrics.kafka.port }}
ports:
- containerPort: {{ .Values.metrics.kafka.port }}
resources:
{{ toYaml .Values.metrics.kafka.resources | indent 10 }}
- image: {{ template "kafka.metrics.kafka.image" . }}
name: kafka-exporter
args:
- --kafka.server={{ template "kafka.fullname" . }}:{{ .Values.service.port }}
- --web.listen-address=:{{ .Values.metrics.kafka.port }}
ports:
- name: metrics
containerPort: {{ .Values.metrics.kafka.port }}
{{- if .Values.metrics.kafka.resources }}
resources: {{ toYaml .Values.metrics.kafka.resources | nindent 12 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,28 @@
{{- if .Values.metrics.jmx.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "kafka.fullname" . }}-jmx-metrics
labels: {{- include "kafka.labels" . | nindent 4 }}
app.kubernetes.io/component: kafka
annotations: {{ include "kafka.tplValue" ( dict "value" .Values.metrics.jmx.service.annotations "context" $) | nindent 4 }}
spec:
type: {{ .Values.metrics.jmx.service.type }}
{{- if and .Values.metrics.jmx.service.loadBalancerIP (eq .Values.metrics.jmx.service.type "LoadBalancer") }}
loadBalancerIP: {{ .Values.metrics.jmx.service.loadBalancerIP }}
{{- end }}
{{- if and (eq .Values.metrics.jmx.service.type "ClusterIP") .Values.metrics.jmx.service.clusterIP }}
clusterIP: {{ .Values.metrics.jmx.service.clusterIP }}
{{- end }}
ports:
- name: metrics
port: {{ .Values.metrics.jmx.service.port }}
targetPort: metrics
{{- if (and (or (eq .Values.metrics.jmx.service.type "NodePort") (eq .Values.metrics.jmx.service.type "LoadBalancer")) (not (empty .Values.metrics.jmx.service.nodePort)))}}
nodePort: {{ .Values.metrics.jmx.service.nodePort }}
{{- else if eq .Values.metrics.jmx.service.type "ClusterIP" }}
nodePort: null
{{- end }}
selector: {{- include "kafka.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: kafka
{{- end }}

View File

@@ -0,0 +1,28 @@
{{- if .Values.metrics.kafka.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "kafka.fullname" . }}-metrics
labels: {{- include "kafka.labels" . | nindent 4 }}
app.kubernetes.io/component: metrics
annotations: {{ include "kafka.tplValue" ( dict "value" .Values.metrics.kafka.service.annotations "context" $) | nindent 4 }}
spec:
type: {{ .Values.metrics.kafka.service.type }}
{{- if and .Values.metrics.kafka.service.loadBalancerIP (eq .Values.metrics.kafka.service.type "LoadBalancer") }}
loadBalancerIP: {{ .Values.metrics.kafka.service.loadBalancerIP }}
{{- end }}
{{- if and (eq .Values.metrics.kafka.service.type "ClusterIP") .Values.metrics.kafka.service.clusterIP }}
clusterIP: {{ .Values.metrics.kafka.service.clusterIP }}
{{- end }}
ports:
- name: metrics
port: {{ .Values.metrics.kafka.service.port }}
targetPort: metrics
{{- if (and (or (eq .Values.metrics.kafka.service.type "NodePort") (eq .Values.metrics.kafka.service.type "LoadBalancer")) (not (empty .Values.metrics.kafka.service.nodePort)))}}
nodePort: {{ .Values.metrics.kafka.service.nodePort }}
{{- else if eq .Values.metrics.kafka.service.type "ClusterIP" }}
nodePort: null
{{- end }}
selector: {{- include "kafka.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: metrics
{{- end }}

View File

@@ -4,17 +4,11 @@ apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ template "kafka.fullname" . }}
labels:
app.kubernetes.io/name: {{ template "kafka.name" . }}
helm.sh/chart: {{ template "kafka.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
labels: {{- include "kafka.labels" . | nindent 4 }}
app.kubernetes.io/component: kafka
spec:
selector:
matchLabels:
app.kubernetes.io/name: {{ template "kafka.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: kafka
{{ toYaml .Values.podDisruptionBudget | indent 2 }}
{{- end }}

View File

@@ -3,11 +3,7 @@ apiVersion: v1
kind: Secret
metadata:
name: {{ template "kafka.fullname" . }}
labels:
app.kubernetes.io/name: {{ template "kafka.name" . }}
helm.sh/chart: {{ template "kafka.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
labels: {{- include "kafka.labels" . | nindent 4 }}
type: Opaque
data:
{{- if .Values.auth.brokerPassword }}

View File

@@ -0,0 +1,30 @@
{{- if and .Values.metrics.jmx.enabled .Values.metrics.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "kafka.fullname" . }}-jmx-metrics
{{- if .Values.metrics.serviceMonitor.namespace }}
namespace: {{ .Values.metrics.serviceMonitor.namespace }}
{{- end }}
labels: {{- include "kafka.labels" . | nindent 4 }}
app.kubernetes.io/component: kafka
{{- range $key, $value := .Values.metrics.serviceMonitor.selector }}
{{ $key }}: {{ $value | quote }}
{{- end }}
spec:
selector:
matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: kafka
endpoints:
- port: metrics
path: "/"
{{- if .Values.metrics.serviceMonitor.interval }}
interval: {{ .Values.metrics.serviceMonitor.interval }}
{{- end }}
{{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
{{- end }}

View File

@@ -0,0 +1,30 @@
{{- if and .Values.metrics.kafka.enabled .Values.metrics.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "kafka.fullname" . }}-metrics
{{- if .Values.metrics.serviceMonitor.namespace }}
namespace: {{ .Values.metrics.serviceMonitor.namespace }}
{{- end }}
labels: {{- include "kafka.labels" . | nindent 4 }}
app.kubernetes.io/component: metrics
{{- range $key, $value := .Values.metrics.serviceMonitor.selector }}
{{ $key }}: {{ $value | quote }}
{{- end }}
spec:
selector:
matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: metrics
endpoints:
- port: metrics
path: "/metrics"
{{- if .Values.metrics.serviceMonitor.interval }}
interval: {{ .Values.metrics.serviceMonitor.interval }}
{{- end }}
{{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
{{- end }}

View File

@@ -1,46 +0,0 @@
{{- if and (or .Values.metrics.kafka.enabled .Values.metrics.jmx.enabled) .Values.metrics.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "kafka.fullname" . }}
{{- if .Values.metrics.serviceMonitor.namespace }}
namespace: {{ .Values.metrics.serviceMonitor.namespace }}
{{- end }}
labels:
app.kubernetes.io/name: {{ template "kafka.name" . }}
helm.sh/chart: {{ template "kafka.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: kafka
{{- range $key, $value := .Values.metrics.serviceMonitor.selector }}
{{ $key }}: {{ $value | quote }}
{{- end }}
spec:
selector:
matchLabels:
app.kubernetes.io/name: {{ template "kafka.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: kafka
endpoints:
{{- if .Values.metrics.kafka.enabled }}
- port: {{ .Values.metrics.kafka.exporterPort }}
{{- if .Values.metrics.serviceMonitor.interval }}
interval: {{ .Values.metrics.serviceMonitor.interval }}
{{- end }}
{{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
{{- end }}
{{- end }}
{{- if .Values.metrics.jmx.enabled }}
- port: {{ .Values.metrics.jmx.exporterPort }}
{{- if .Values.metrics.serviceMonitor.interval }}
interval: {{ .Values.metrics.serviceMonitor.interval }}
{{- end }}
{{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
{{- end }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
{{- end }}

View File

@@ -1,18 +1,12 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: "{{ template "kafka.fullname" . }}"
labels:
app.kubernetes.io/name: {{ template "kafka.name" . }}
helm.sh/chart: {{ template "kafka.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
name: {{ include "kafka.fullname" . }}
labels: {{- include "kafka.labels" . | nindent 4 }}
app.kubernetes.io/component: kafka
spec:
selector:
matchLabels:
app.kubernetes.io/name: {{ template "kafka.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: kafka
serviceName: {{ template "kafka.fullname" . }}-headless
podManagementPolicy: "Parallel"
@@ -27,17 +21,7 @@ spec:
{{- end }}
template:
metadata:
{{ if .Values.metrics.jmx.enabled }}
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: {{ .Values.metrics.jmx.exporterPort | quote }}
{{- end }}
name: "{{ template "kafka.fullname" . }}"
labels:
app.kubernetes.io/name: {{ template "kafka.name" . }}
helm.sh/chart: {{ template "kafka.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
labels: {{- include "kafka.labels" . | nindent 8 }}
app.kubernetes.io/component: kafka
spec:
{{- include "kafka.imagePullSecrets" . | indent 6 }}
@@ -46,283 +30,267 @@ spec:
fsGroup: {{ .Values.securityContext.fsGroup }}
runAsUser: {{ .Values.securityContext.runAsUser }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.tolerations }}
tolerations:
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
{{- if .Values.affinity }}
affinity:
{{ toYaml .Values.affinity | indent 8 }}
{{- end }}
nodeSelector: {{- include "kafka.tplValue" ( dict "value" .Values.nodeSelector "context" $ ) | nindent 8 }}
tolerations: {{- include "kafka.tplValue" ( dict "value" .Values.tolerations "context" $ ) | nindent 8 }}
affinity: {{- include "kafka.tplValue" ( dict "value" .Values.affinity "context" $ ) | nindent 8 }}
{{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }}
initContainers:
- name: volume-permissions
image: "{{ template "kafka.volumePermissions.image" . }}"
imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }}
command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "/bitnami/kafka"]
securityContext:
runAsUser: 0
resources: {{ toYaml .Values.volumePermissions.resources | nindent 10 }}
volumeMounts:
- name: data
mountPath: "/bitnami/kafka"
- name: volume-permissions
image: {{ include "kafka.volumePermissions.image" . }}
imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }}
command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "/bitnami/kafka"]
securityContext:
runAsUser: 0
{{- if .Values.volumePermissions.resources }}
resources: {{ toYaml .Values.volumePermissions.resources | nindent 12 }}
{{- end }}
volumeMounts:
- name: data
mountPath: "/bitnami/kafka"
{{- end }}
containers:
- name: kafka
image: "{{ template "kafka.image" . }}"
imagePullPolicy: "{{ .Values.image.pullPolicy }}"
resources:
{{ toYaml .Values.resources | indent 10 }}
env:
{{- if .Values.image.debug }}
- name: BASH_DEBUG
value: "1"
- name: NAMI_DEBUG
value: "1"
- name: NAMI_LOG_LEVEL
value: "trace8"
{{- end }}
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: KAFKA_CFG_ZOOKEEPER_CONNECT
{{- if .Values.zookeeper.enabled }}
value: {{ template "kafka.zookeeper.fullname" . }}
{{- else }}
value: {{ .Values.externalZookeeper.servers | quote }}
{{- end }}
- name: KAFKA_PORT_NUMBER
value: {{ .Values.service.port | quote }}
- name: KAFKA_CFG_LISTENERS
{{- if .Values.listeners }}
value: {{ .Values.listeners }}
{{- else if and .Values.auth.ssl .Values.auth.enabled }}
value: "SASL_SSL://:$(KAFKA_PORT_NUMBER),SSL://:9093"
{{- else if .Values.auth.enabled }}
value: "SASL_SSL://:$(KAFKA_PORT_NUMBER)"
{{- else }}
value: "PLAINTEXT://:$(KAFKA_PORT_NUMBER)"
{{- end }}
- name: KAFKA_CFG_ADVERTISED_LISTENERS
{{- if .Values.advertisedListeners }}
value: {{ .Values.advertisedListeners }}
{{- else if and .Values.auth.ssl .Values.auth.enabled }}
value: 'SASL_SSL://$(MY_POD_NAME).{{ template "kafka.fullname" . }}-headless.{{.Release.Namespace}}.svc.{{ .Values.clusterDomain }}:$(KAFKA_PORT_NUMBER),SSL://$(MY_POD_NAME).{{ template "kafka.fullname" . }}-headless.{{.Release.Namespace}}.svc.{{ .Values.clusterDomain }}:9093'
{{- else if .Values.auth.enabled }}
value: 'SASL_SSL://$(MY_POD_NAME).{{ template "kafka.fullname" . }}-headless.{{.Release.Namespace}}.svc.{{ .Values.clusterDomain }}:$(KAFKA_PORT_NUMBER)'
{{- else }}
value: 'PLAINTEXT://$(MY_POD_NAME).{{ template "kafka.fullname" . }}-headless.{{.Release.Namespace}}.svc.{{ .Values.clusterDomain }}:$(KAFKA_PORT_NUMBER)'
{{- end }}
{{- if .Values.listenerSecurityProtocolMap }}
- name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP
value: {{ .Values.listenerSecurityProtocolMap }}
{{- end }}
{{- if .Values.interBrokerListenerName }}
- name: KAFKA_INTER_BROKER_LISTENER_NAME
value: {{ .Values.interBrokerListenerName }}
{{- end }}
{{- if .Values.metrics.jmx.enabled }}
- name: JMX_PORT
value: {{ .Values.metrics.jmx.jmxPort | quote }}
{{- end }}
{{- if .Values.auth.enabled }}
- name: KAFKA_OPTS
value: "-Djava.security.auth.login.config=/opt/bitnami/kafka/conf/kafka_jaas.conf"
- name: KAFKA_BROKER_USER
value: {{ .Values.auth.brokerUser | quote }}
- name: KAFKA_BROKER_PASSWORD
valueFrom:
secretKeyRef:
name: {{ if .Values.auth.existingSecret }}{{ .Values.auth.existingSecret }}{{ else }}{{ template "kafka.fullname" . }}{{ end }}
key: kafka-broker-password
- name: KAFKA_INTER_BROKER_USER
value: {{ .Values.auth.interBrokerUser | quote }}
- name: KAFKA_INTER_BROKER_PASSWORD
valueFrom:
secretKeyRef:
name: {{ if .Values.auth.existingSecret }}{{ .Values.auth.existingSecret }}{{ else }}{{ template "kafka.fullname" . }}{{ end }}
key: kafka-inter-broker-password
{{- if .Values.auth.zookeeperUser }}
- name: KAFKA_ZOOKEEPER_USER
value: {{ .Values.auth.zookeeperUser | quote }}
{{- end }}
{{- if .Values.auth.zookeeperPassword }}
- name: KAFKA_ZOOKEEPER_PASSWORD
valueFrom:
secretKeyRef:
name: {{ if .Values.auth.existingSecret }}{{ .Values.auth.existingSecret }}{{ else }}{{ template "kafka.fullname" . }}{{ end }}
key: kafka-zookeeper-password
{{- end }}
{{- end }}
{{- if .Values.auth.certificatesPassword }}
- name: KAFKA_CERTIFICATE_PASSWORD
value: {{ .Values.auth.certificatesPassword | quote }}
{{- end }}
- name: ALLOW_PLAINTEXT_LISTENER
{{- if .Values.auth.enabled }}
value: "no"
{{- else if .Values.allowPlaintextListener }}
value: "yes"
{{- else }}
value: "no"
{{- end }}
- name: KAFKA_CFG_BROKER_ID
value: {{ .Values.brokerId | quote }}
- name: KAFKA_CFG_DELETE_TOPIC_ENABLE
value: {{ .Values.deleteTopicEnable | quote }}
- name: KAFKA_HEAP_OPTS
value: {{ .Values.heapOpts | quote }}
- name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MESSAGES
value: {{ .Values.logFlushIntervalMessages | quote }}
- name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MS
value: {{ .Values.logFlushIntervalMs | quote }}
- name: KAFKA_CFG_LOG_RETENTION_BYTES
value: {{ .Values.logRetentionBytes | replace "_" "" | quote }}
- name: KAFKA_CFG_LOG_RETENTION_CHECK_INTERVALS_MS
value: {{ .Values.logRetentionCheckIntervalMs | quote }}
- name: KAFKA_CFG_LOG_RETENTION_HOURS
value: {{ .Values.logRetentionHours | quote }}
{{- if .Values.logMessageFormatVersion }}
- name: KAFKA_CFG_LOG_MESSAGE_FORMAT_VERSION
value: {{ .Values.logMessageFormatVersion | quote }}
{{- end }}
- name: KAFKA_CFG_MESSAGE_MAX_BYTES
value: {{ .Values.maxMessageBytes | replace "_" "" | quote }}
- name: KAFKA_CFG_LOG_SEGMENT_BYTES
value: {{ .Values.logSegmentBytes | replace "_" "" | quote }}
- name: KAFKA_CFG_LOG_DIRS
value: {{ .Values.logsDirs }}
- name: KAFKA_CFG_DEFAULT_REPLICATION_FACTOR
value: {{ .Values.defaultReplicationFactor | quote }}
- name: KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR
value: {{ .Values.offsetsTopicReplicationFactor | quote }}
- name: KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR
value: {{ .Values.transactionStateLogReplicationFactor | quote }}
- name: KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM
value: {{ .Values.sslEndpointIdentificationAlgorithm | quote }}
- name: KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR
value: {{ .Values.transactionStateLogMinIsr | quote }}
- name: KAFKA_CFG_NUM_IO_THREADS
value: {{ .Values.numIoThreads | quote }}
- name: KAFKA_CFG_NUM_NETWORK_THREADS
value: {{ .Values.numNetworkThreads | quote }}
- name: KAFKA_CFG_NUM_PARTITIONS
value: {{ .Values.numPartitions | quote }}
- name: KAFKA_CFG_NUM_RECOVERY_THREADS_PER_DATA_DIR
value: {{ .Values.numRecoveryThreadsPerDataDir | quote }}
- name: KAFKA_CFG_SOCKET_RECEIVE_BUFFER_BYTES
value: {{ .Values.socketReceiveBufferBytes | quote }}
- name: KAFKA_CFG_SOCKET_REQUEST_MAX_BYTES
value: {{ .Values.socketRequestMaxBytes | replace "_" "" | quote }}
- name: KAFKA_CFG_SOCKET_SEND_BUFFER_BYTES
value: {{ .Values.socketSendBufferBytes | quote }}
- name: KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS
value: {{ .Values.zookeeperConnectionTimeoutMs | quote }}
{{- if .Values.extraEnvVars }}
{{ toYaml .Values.extraEnvVars | nindent 8 }}
{{- end }}
ports:
- name: kafka
containerPort: {{ .Values.service.port }}
{{- if .Values.auth.ssl }}
- name: kafka-ssl
containerPort: 9093
{{- end }}
{{- if .Values.livenessProbe.enabled }}
livenessProbe:
tcpSocket:
port: kafka
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.readinessProbe.enabled }}
readinessProbe:
tcpSocket:
port: kafka
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
{{- if .Values.persistence.enabled }}
- name: data
mountPath: /bitnami/kafka
image: {{ include "kafka.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
{{- if .Values.resources }}
resources: {{ toYaml .Values.resources | nindent 12 }}
{{- end }}
env:
- name: BITNAMI_DEBUG
value: {{ ternary "true" "false" .Values.image.debug | quote }}
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: KAFKA_CFG_ZOOKEEPER_CONNECT
{{- if .Values.zookeeper.enabled }}
value: {{ template "kafka.zookeeper.fullname" . }}
{{- else }}
value: {{ .Values.externalZookeeper.servers | quote }}
{{- end }}
- name: KAFKA_PORT_NUMBER
value: {{ .Values.service.port | quote }}
- name: KAFKA_CFG_LISTENERS
{{- if .Values.listeners }}
value: {{ .Values.listeners }}
{{- else if and .Values.auth.ssl .Values.auth.enabled }}
value: "SASL_SSL://:$(KAFKA_PORT_NUMBER),SSL://:9093"
{{- else if .Values.auth.enabled }}
value: "SASL_SSL://:$(KAFKA_PORT_NUMBER)"
{{- else }}
value: "PLAINTEXT://:$(KAFKA_PORT_NUMBER)"
{{- end }}
- name: KAFKA_CFG_ADVERTISED_LISTENERS
{{- if .Values.advertisedListeners }}
value: {{ .Values.advertisedListeners }}
{{- else if and .Values.auth.ssl .Values.auth.enabled }}
value: 'SASL_SSL://$(MY_POD_NAME).{{ template "kafka.fullname" . }}-headless.{{.Release.Namespace}}.svc.{{ .Values.clusterDomain }}:$(KAFKA_PORT_NUMBER),SSL://$(MY_POD_NAME).{{ template "kafka.fullname" . }}-headless.{{.Release.Namespace}}.svc.{{ .Values.clusterDomain }}:9093'
{{- else if .Values.auth.enabled }}
value: 'SASL_SSL://$(MY_POD_NAME).{{ template "kafka.fullname" . }}-headless.{{.Release.Namespace}}.svc.{{ .Values.clusterDomain }}:$(KAFKA_PORT_NUMBER)'
{{- else }}
value: 'PLAINTEXT://$(MY_POD_NAME).{{ template "kafka.fullname" . }}-headless.{{.Release.Namespace}}.svc.{{ .Values.clusterDomain }}:$(KAFKA_PORT_NUMBER)'
{{- end }}
{{- if .Values.listenerSecurityProtocolMap }}
- name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP
value: {{ .Values.listenerSecurityProtocolMap }}
{{- end }}
{{- if .Values.interBrokerListenerName }}
- name: KAFKA_INTER_BROKER_LISTENER_NAME
value: {{ .Values.interBrokerListenerName }}
{{- end }}
{{- if .Values.metrics.jmx.enabled }}
- name: JMX_PORT
value: {{ .Values.metrics.jmx.jmxPort | quote }}
{{- end }}
{{- if .Values.auth.enabled }}
- name: KAFKA_OPTS
value: "-Djava.security.auth.login.config=/opt/bitnami/kafka/conf/kafka_jaas.conf"
- name: KAFKA_BROKER_USER
value: {{ .Values.auth.brokerUser | quote }}
- name: KAFKA_BROKER_PASSWORD
valueFrom:
secretKeyRef:
name: {{ if .Values.auth.existingSecret }}{{ .Values.auth.existingSecret }}{{ else }}{{ template "kafka.fullname" . }}{{ end }}
key: kafka-broker-password
- name: KAFKA_INTER_BROKER_USER
value: {{ .Values.auth.interBrokerUser | quote }}
- name: KAFKA_INTER_BROKER_PASSWORD
valueFrom:
secretKeyRef:
name: {{ if .Values.auth.existingSecret }}{{ .Values.auth.existingSecret }}{{ else }}{{ template "kafka.fullname" . }}{{ end }}
key: kafka-inter-broker-password
{{- if .Values.auth.zookeeperUser }}
- name: KAFKA_ZOOKEEPER_USER
value: {{ .Values.auth.zookeeperUser | quote }}
{{- end }}
{{- if .Values.auth.zookeeperPassword }}
- name: KAFKA_ZOOKEEPER_PASSWORD
valueFrom:
secretKeyRef:
name: {{ if .Values.auth.existingSecret }}{{ .Values.auth.existingSecret }}{{ else }}{{ template "kafka.fullname" . }}{{ end }}
key: kafka-zookeeper-password
{{- end }}
{{- end }}
{{- if .Values.auth.certificatesPassword }}
- name: KAFKA_CERTIFICATE_PASSWORD
value: {{ .Values.auth.certificatesPassword | quote }}
{{- end }}
- name: ALLOW_PLAINTEXT_LISTENER
value: {{ ternary "yes" "no" (or .Values.auth.enabled .Values.allowPlaintextListener) | quote }}
- name: KAFKA_CFG_BROKER_ID
value: {{ .Values.brokerId | quote }}
- name: KAFKA_CFG_DELETE_TOPIC_ENABLE
value: {{ .Values.deleteTopicEnable | quote }}
- name: KAFKA_HEAP_OPTS
value: {{ .Values.heapOpts | quote }}
- name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MESSAGES
value: {{ .Values.logFlushIntervalMessages | quote }}
- name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MS
value: {{ .Values.logFlushIntervalMs | quote }}
- name: KAFKA_CFG_LOG_RETENTION_BYTES
value: {{ .Values.logRetentionBytes | replace "_" "" | quote }}
- name: KAFKA_CFG_LOG_RETENTION_CHECK_INTERVALS_MS
value: {{ .Values.logRetentionCheckIntervalMs | quote }}
- name: KAFKA_CFG_LOG_RETENTION_HOURS
value: {{ .Values.logRetentionHours | quote }}
{{- if .Values.logMessageFormatVersion }}
- name: KAFKA_CFG_LOG_MESSAGE_FORMAT_VERSION
value: {{ .Values.logMessageFormatVersion | quote }}
{{- end }}
- name: KAFKA_CFG_MESSAGE_MAX_BYTES
value: {{ .Values.maxMessageBytes | replace "_" "" | quote }}
- name: KAFKA_CFG_LOG_SEGMENT_BYTES
value: {{ .Values.logSegmentBytes | replace "_" "" | quote }}
- name: KAFKA_CFG_LOG_DIRS
value: {{ .Values.logsDirs }}
- name: KAFKA_CFG_DEFAULT_REPLICATION_FACTOR
value: {{ .Values.defaultReplicationFactor | quote }}
- name: KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR
value: {{ .Values.offsetsTopicReplicationFactor | quote }}
- name: KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR
value: {{ .Values.transactionStateLogReplicationFactor | quote }}
- name: KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM
value: {{ .Values.sslEndpointIdentificationAlgorithm | quote }}
- name: KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR
value: {{ .Values.transactionStateLogMinIsr | quote }}
- name: KAFKA_CFG_NUM_IO_THREADS
value: {{ .Values.numIoThreads | quote }}
- name: KAFKA_CFG_NUM_NETWORK_THREADS
value: {{ .Values.numNetworkThreads | quote }}
- name: KAFKA_CFG_NUM_PARTITIONS
value: {{ .Values.numPartitions | quote }}
- name: KAFKA_CFG_NUM_RECOVERY_THREADS_PER_DATA_DIR
value: {{ .Values.numRecoveryThreadsPerDataDir | quote }}
- name: KAFKA_CFG_SOCKET_RECEIVE_BUFFER_BYTES
value: {{ .Values.socketReceiveBufferBytes | quote }}
- name: KAFKA_CFG_SOCKET_REQUEST_MAX_BYTES
value: {{ .Values.socketRequestMaxBytes | replace "_" "" | quote }}
- name: KAFKA_CFG_SOCKET_SEND_BUFFER_BYTES
value: {{ .Values.socketSendBufferBytes | quote }}
- name: KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS
value: {{ .Values.zookeeperConnectionTimeoutMs | quote }}
{{- if .Values.extraEnvVars }}
{{ include "kafka.tplValue" ( dict "value" .Values.extraEnvVars "context" $) | nindent 12 }}
{{- end }}
ports:
- name: kafka
containerPort: {{ .Values.service.port }}
{{- if .Values.auth.ssl }}
- name: kafka-ssl
containerPort: 9093
{{- end }}
{{- if .Values.livenessProbe.enabled }}
livenessProbe:
tcpSocket:
port: kafka
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.readinessProbe.enabled }}
readinessProbe:
tcpSocket:
port: kafka
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
{{- if .Values.persistence.enabled }}
- name: data
mountPath: /bitnami/kafka
{{- end }}
{{- if .Values.config }}
- name: kafka-config
mountPath: /opt/bitnami/kafka/conf/server.properties
subPath: server.properties
{{- end }}
{{- if .Values.auth.enabled }}
- name: kafka-certificates
mountPath: /opt/bitnami/kafka/conf/certs/
readOnly: true
{{- end }}
{{ if .Values.metrics.jmx.enabled }}
- name: jmx-exporter
image: "{{ template "kafka.metrics.jmx.image" . }}"
imagePullPolicy: "{{ .Values.metrics.jmx.image.pullPolicy }}"
command:
- java
- -XX:+UnlockExperimentalVMOptions
- -XX:+UseCGroupMemoryLimitForHeap
- -XX:MaxRAMFraction=1
- -XshowSettings:vm
- -jar
- jmx_prometheus_httpserver.jar
- {{ .Values.metrics.jmx.exporterPort | quote }}
- /etc/jmx-kafka/jmx-kafka-prometheus.yml
ports:
- name: metrics
containerPort: {{ .Values.metrics.jmx.exporterPort }}
{{- if .Values.metrics.jmx.resources }}
resources: {{ toYaml .Values.metrics.jmx.resources | nindent 12 }}
{{- end }}
volumeMounts:
- name: jmx-config
mountPath: /etc/jmx-kafka
{{ end }}
volumes:
{{ if .Values.metrics.jmx.enabled }}
- name: jmx-config
configMap:
{{- if .Values.metrics.jmx.configMap.overrideName }}
name: {{ .Values.metrics.jmx.configMap.overrideName }}
{{- else }}
name: {{ template "kafka.fullname" . }}-jmx-configuration
{{- end }}
{{ end }}
{{ if .Values.config }}
- name: kafka-config
mountPath: /opt/bitnami/kafka/conf/server.properties
subPath: server.properties
configMap:
name: {{ template "kafka.fullname" . }}-configuration
{{ end }}
{{ if .Values.auth.enabled }}
- name: kafka-certificates
mountPath: /opt/bitnami/kafka/conf/certs/
readOnly: true
secret:
secretName: {{ required "A secret containinig the Kafka JKS certificates is required when authentication in enabled" .Values.auth.certificatesSecret }}
defaultMode: 256
{{ end }}
{{ if .Values.metrics.jmx.enabled }}
- name: jmx-exporter
image: "{{ template "kafka.metrics.jmx.image" . }}"
imagePullPolicy: "{{ .Values.metrics.jmx.image.pullPolicy }}"
command:
- java
- -XX:+UnlockExperimentalVMOptions
- -XX:+UseCGroupMemoryLimitForHeap
- -XX:MaxRAMFraction=1
- -XshowSettings:vm
- -jar
- jmx_prometheus_httpserver.jar
- {{ .Values.metrics.jmx.exporterPort | quote }}
- /etc/jmx-kafka/jmx-kafka-prometheus.yml
ports:
- containerPort: {{ .Values.metrics.jmx.exporterPort }}
resources:
{{ toYaml .Values.metrics.jmx.resources | indent 10 }}
volumeMounts:
- name: jmx-config
mountPath: /etc/jmx-kafka
{{ end }}
volumes:
{{ if .Values.metrics.jmx.enabled }}
- name: jmx-config
configMap:
{{- if .Values.metrics.jmx.configMap.overrideName }}
name: {{ .Values.metrics.jmx.configMap.overrideName }}
{{- else }}
name: {{ template "kafka.fullname" . }}-jmx-configuration
{{- end }}
{{ end }}
{{ if .Values.config }}
- name: kafka-config
configMap:
name: {{ template "kafka.fullname" . }}-configuration
{{ end }}
{{ if .Values.auth.enabled }}
- name: kafka-certificates
secret:
secretName: {{ required "A secret containinig the Kafka JKS certificates is required when authentication in enabled" .Values.auth.certificatesSecret }}
defaultMode: 256
{{ end }}
{{- if not .Values.persistence.enabled }}
- name: data
emptyDir: {}
- name: data
emptyDir: {}
{{- else if .Values.persistence.existingClaim }}
- name: data
persistentVolumeClaim:
- name: data
persistentVolumeClaim:
{{- with .Values.persistence.existingClaim }}
claimName: {{ tpl . $ }}
claimName: {{ tpl . $ }}
{{- end }}
{{- else }}
volumeClaimTemplates:
@@ -342,5 +310,5 @@ spec:
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{ include "kafka.storageClass" . }}
{{ include "kafka.storageClass" . | nindent 8 }}
{{- end }}

View File

@@ -2,25 +2,19 @@ apiVersion: v1
kind: Service
metadata:
name: {{ template "kafka.fullname" . }}-headless
labels:
app.kubernetes.io/name: {{ template "kafka.name" . }}
helm.sh/chart: {{ template "kafka.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
labels: {{- include "kafka.labels" . | nindent 4 }}
app.kubernetes.io/component: kafka
spec:
type: ClusterIP
clusterIP: None
ports:
- name: kafka
port: {{ .Values.service.port }}
targetPort: kafka
{{- if .Values.auth.ssl }}
- name: kafka-ssl
port: 9093
targetPort: kafka-ssl
{{- end }}
selector:
app.kubernetes.io/name: {{ template "kafka.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
- name: kafka
port: {{ .Values.service.port }}
targetPort: kafka
{{- if .Values.auth.ssl }}
- name: kafka-ssl
port: 9093
targetPort: kafka-ssl
{{- end }}
selector: {{- include "kafka.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: kafka

View File

@@ -2,16 +2,9 @@ apiVersion: v1
kind: Service
metadata:
name: {{ template "kafka.fullname" . }}
labels:
app.kubernetes.io/name: {{ template "kafka.name" . }}
helm.sh/chart: {{ template "kafka.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
labels: {{- include "kafka.labels" . | nindent 4 }}
app.kubernetes.io/component: kafka
annotations:
{{- range $key, $value := .Values.service.annotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
annotations: {{ include "kafka.tplValue" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if eq .Values.service.type "LoadBalancer" }}
@@ -20,18 +13,16 @@ spec:
{{- end }}
{{- end }}
ports:
- name: kafka
port: {{ .Values.service.port }}
{{- if and .Values.service.nodePort (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) }}
nodePort: {{ .Values.service.nodePort }}
- name: kafka
port: {{ .Values.service.port }}
{{- if and .Values.service.nodePort (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
targetPort: kafka
{{- if .Values.auth.ssl }}
- name: kafka-ssl
port: 9093
targetPort: kafka-ssl
{{- end }}
targetPort: kafka
{{- if .Values.auth.ssl }}
- name: kafka-ssl
port: 9093
targetPort: kafka-ssl
{{- end }}
selector:
app.kubernetes.io/name: {{ template "kafka.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
selector: {{- include "kafka.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: kafka

View File

@@ -205,9 +205,10 @@ sslEndpointIdentificationAlgorithm: https
auth:
## Switch to enable the kafka authentication.
enabled: true
##Enable SSL to be used with brokers and consumers
## Enable SSL to be used with brokers and consumers
#ssl: false
## Name of the existing secret containing credentials for brokerUser, interBrokerUser and zookeeperUser.
#existingSecret:
@@ -262,7 +263,7 @@ service:
# loadBalancerIP:
## Service annotations done as key:value pairs
annotations:
annotations: {}
## Kafka data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
@@ -336,6 +337,9 @@ metrics:
kafka:
enabled: true
## Bitnami Kafka exporter image
## ref: https://hub.docker.com/r/bitnami/kafka-exporter/tags/
##
image:
registry: docker.io
repository: bitnami/kafka-exporter
@@ -354,19 +358,55 @@ metrics:
## Port kafka-exporter exposes for Prometheus to scrape metrics
port: 9308
## Resource limits
resources: {}
# limits:
# cpu: 200m
# memory: 1Gi
# requests:
# cpu: 100m
# memory: 100Mi
## Prometheus Kafka Exporter' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 100m
# memory: 128Mi
requests: {}
# cpu: 100m
# memory: 128Mi
service:
## Kafka Exporter Service type
##
type: ClusterIP
## Kafka Exporter Prometheus port
##
port: 9308
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
nodePort: ""
## Set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
# loadBalancerIP:
## Set the Cluster IP to use
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
##
# clusterIP: None
## Annotations for the Kafka Exporter Prometheus metrics service
##
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.metrics.kafka.port }}"
prometheus.io/path: "/metrics"
## Prometheus JMX Exporter: exposes the majority of Kafkas metrics
##
jmx:
enabled: true
## Bitnami JMX exporter image
## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/
##
image:
registry: docker.io
repository: bitnami/jmx-exporter
@@ -380,18 +420,53 @@ metrics:
# - myRegistryKeySecretName
## Interval at which Prometheus scrapes metrics, note: only used by Prometheus Operator
##
interval: 10s
## Port jmx-exporter exposes Prometheus format metrics to scrape
##
exporterPort: 5556
resources: {}
# limits:
# cpu: 200m
# memory: 1Gi
# requests:
## Prometheus JMX Exporter' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 100m
# memory: 100Mi
# memory: 128Mi
requests: {}
# cpu: 100m
# memory: 128Mi
service:
## JMX Exporter Service type
##
type: ClusterIP
## JMX Exporter Prometheus port
##
port: 5556
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
nodePort: ""
## Set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
# loadBalancerIP:
## Set the Cluster IP to use
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
##
# clusterIP: None
## Annotations for the JMX Exporter Prometheus metrics service
##
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.metrics.jmx.exporterPort }}"
prometheus.io/path: "/"
## Credits to the incubator/kafka chart for the JMX configuration.
## https://github.com/helm/charts/tree/master/incubator/kafka
@@ -400,12 +475,15 @@ metrics:
## there are still more stats to clean up and expose, others will never get exposed. They keep lots of duplicates
## that can be derived easily. The configMap in this chart cleans up the metrics it exposes to be in a Prometheus
## format, eg topic, broker are labels and not part of metric name. Improvements are gladly accepted and encouraged.
##
configMap:
## Allows disabling the default configmap, note a configMap is needed
##
enabled: true
## Allows setting values to generate confimap
## To allow all metrics through (warning its crazy excessive) comment out below `overrideConfig` and set
## `whitelistObjectNames: []`
##
overrideConfig: {}
# jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi
# lowercaseOutputName: true
@@ -415,31 +493,45 @@ metrics:
# - pattern: ".*"
## If you would like to supply your own ConfigMap for JMX metrics, supply the name of that
## ConfigMap as an `overrideName` here.
##
overrideName: ""
## Port the jmx metrics are exposed in native jmx format, not in Prometheus format
##
jmxPort: 5555
## JMX Whitelist Objects, can be set to control which JMX metrics are exposed. Only whitelisted
## values will be exposed via JMX Exporter. They must also be exposed via Rules. To expose all metrics
## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []`
## (2) commented out above `overrideConfig`.
##
whitelistObjectNames: # []
- kafka.controller:*
- kafka.server:*
- java.lang:*
- kafka.network:*
- kafka.log:*
- kafka.controller:*
- kafka.server:*
- java.lang:*
- kafka.network:*
- kafka.log:*
# Enable this if you're using https://github.com/coreos/prometheus-operator
serviceMonitor:
enabled: false
namespace: monitoring
# fallback to the prometheus default unless specified
## Namespace in which Prometheus is running
##
# namespace: monitoring
## Interval at which metrics should be scraped.
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
# interval: 10s
## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr)
## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1)
## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters)
selector:
prometheus: kube-prometheus
## Timeout after which the scrape is ended
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
# scrapeTimeout: 10s
## ServiceMonitor selector labels
## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
##
# selector:
# prometheus: my-prometheus
##
## Zookeeper chart configuration

View File

@@ -206,7 +206,7 @@ auth:
## Switch to enable the kafka authentication.
enabled: false
##Enable SSL to be used with brokers and consumers
## Enable SSL to be used with brokers and consumers
#ssl: false
## Name of the existing secret containing credentials for brokerUser, interBrokerUser and zookeeperUser.
@@ -263,7 +263,7 @@ service:
# loadBalancerIP:
## Service annotations done as key:value pairs
annotations:
annotations: {}
## Kafka data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
@@ -337,6 +337,9 @@ metrics:
kafka:
enabled: false
## Bitnami Kafka exporter image
## ref: https://hub.docker.com/r/bitnami/kafka-exporter/tags/
##
image:
registry: docker.io
repository: bitnami/kafka-exporter
@@ -355,19 +358,55 @@ metrics:
## Port kafka-exporter exposes for Prometheus to scrape metrics
port: 9308
## Resource limits
resources: {}
# limits:
# cpu: 200m
# memory: 1Gi
# requests:
# cpu: 100m
# memory: 100Mi
## Prometheus Kafka Exporter' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 100m
# memory: 128Mi
requests: {}
# cpu: 100m
# memory: 128Mi
service:
## Kafka Exporter Service type
##
type: ClusterIP
## Kafka Exporter Prometheus port
##
port: 9308
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
nodePort: ""
## Set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
# loadBalancerIP:
## Set the Cluster IP to use
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
##
# clusterIP: None
## Annotations for the Kafka Exporter Prometheus metrics service
##
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.metrics.kafka.port }}"
prometheus.io/path: "/metrics"
## Prometheus JMX Exporter: exposes the majority of Kafkas metrics
##
jmx:
enabled: false
## Bitnami JMX exporter image
## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/
##
image:
registry: docker.io
repository: bitnami/jmx-exporter
@@ -381,18 +420,53 @@ metrics:
# - myRegistryKeySecretName
## Interval at which Prometheus scrapes metrics, note: only used by Prometheus Operator
##
interval: 10s
## Port jmx-exporter exposes Prometheus format metrics to scrape
##
exporterPort: 5556
resources: {}
# limits:
# cpu: 200m
# memory: 1Gi
# requests:
## Prometheus JMX Exporter' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 100m
# memory: 100Mi
# memory: 128Mi
requests: {}
# cpu: 100m
# memory: 128Mi
service:
## JMX Exporter Service type
##
type: ClusterIP
## JMX Exporter Prometheus port
##
port: 5556
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
nodePort: ""
## Set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
# loadBalancerIP:
## Set the Cluster IP to use
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
##
# clusterIP: None
## Annotations for the JMX Exporter Prometheus metrics service
##
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.metrics.jmx.exporterPort }}"
prometheus.io/path: "/"
## Credits to the incubator/kafka chart for the JMX configuration.
## https://github.com/helm/charts/tree/master/incubator/kafka
@@ -401,12 +475,15 @@ metrics:
## there are still more stats to clean up and expose, others will never get exposed. They keep lots of duplicates
## that can be derived easily. The configMap in this chart cleans up the metrics it exposes to be in a Prometheus
## format, eg topic, broker are labels and not part of metric name. Improvements are gladly accepted and encouraged.
##
configMap:
## Allows disabling the default configmap, note a configMap is needed
##
enabled: true
## Allows setting values to generate confimap
## To allow all metrics through (warning its crazy excessive) comment out below `overrideConfig` and set
## `whitelistObjectNames: []`
##
overrideConfig: {}
# jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi
# lowercaseOutputName: true
@@ -416,31 +493,45 @@ metrics:
# - pattern: ".*"
## If you would like to supply your own ConfigMap for JMX metrics, supply the name of that
## ConfigMap as an `overrideName` here.
##
overrideName: ""
## Port the jmx metrics are exposed in native jmx format, not in Prometheus format
##
jmxPort: 5555
## JMX Whitelist Objects, can be set to control which JMX metrics are exposed. Only whitelisted
## values will be exposed via JMX Exporter. They must also be exposed via Rules. To expose all metrics
## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []`
## (2) commented out above `overrideConfig`.
##
whitelistObjectNames: # []
- kafka.controller:*
- kafka.server:*
- java.lang:*
- kafka.network:*
- kafka.log:*
- kafka.controller:*
- kafka.server:*
- java.lang:*
- kafka.network:*
- kafka.log:*
# Enable this if you're using https://github.com/coreos/prometheus-operator
serviceMonitor:
enabled: false
namespace: monitoring
# fallback to the prometheus default unless specified
## Namespace in which Prometheus is running
##
# namespace: monitoring
## Interval at which metrics should be scraped.
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
# interval: 10s
## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr)
## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1)
## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters)
selector:
prometheus: kube-prometheus
## Timeout after which the scrape is ended
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
# scrapeTimeout: 10s
## ServiceMonitor selector labels
## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
##
# selector:
# prometheus: my-prometheus
##
## Zookeeper chart configuration

View File

@@ -1,7 +1,7 @@
apiVersion: v1
name: kibana
version: 2.0.2
appVersion: 7.4.1
version: 2.0.5
appVersion: 7.4.2
description: Kibana is an open source, browser based analytics and search dashboard for Elasticsearch.
keywords:
- kibana

View File

@@ -14,7 +14,7 @@ global: {}
image:
registry: docker.io
repository: bitnami/kibana
tag: 7.4.1-debian-9-r4
tag: 7.4.2-debian-9-r2
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images

View File

@@ -14,7 +14,7 @@ global: {}
image:
registry: docker.io
repository: bitnami/kibana
tag: 7.4.1-debian-9-r4
tag: 7.4.2-debian-9-r2
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images

View File

@@ -1,5 +1,5 @@
apiVersion: v1
appVersion: 7.4.0
appVersion: 7.4.2
description: Logstash is an open source, server-side data processing pipeline that ingests data from a multitude of sources simultaneously, transforms it, and then sends it to your favorite "stash".
engine: gotpl
home: https://www.elastic.co/products/logstash
@@ -14,4 +14,4 @@ maintainers:
name: logstash
sources:
- https://github.com/bitnami/bitnami-docker-logstash
version: 0.2.0
version: 0.2.1

View File

@@ -1,6 +1,6 @@
apiVersion: v1
name: magento
version: 9.0.1
version: 9.0.2
appVersion: 2.3.3
description: A feature-rich flexible e-commerce solution. It includes transaction options, multi-store functionality, loyalty programs, product categorization and shopper filtering, promotion rules, and more.
keywords:

View File

@@ -14,7 +14,7 @@
image:
registry: docker.io
repository: bitnami/magento
tag: 2.3.3-debian-9-r13
tag: 2.3.3-debian-9-r14
## Set to true if you would like to see extra information on logs
## It turns BASH and NAMI debugging in minideb
## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
@@ -178,7 +178,7 @@ elasticsearch:
image:
registry: docker.io
repository: bitnami/elasticsearch
tag: 6.8.4-debian-9-r10
tag: 6.8.4-debian-9-r13
## Enable to perform the sysctl operation
sysctlImage:
enabled: true
@@ -338,7 +338,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/apache-exporter
tag: 0.7.0-debian-9-r101
tag: 0.7.0-debian-9-r104
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.

View File

@@ -14,7 +14,7 @@
image:
registry: docker.io
repository: bitnami/magento
tag: 2.3.3-debian-9-r13
tag: 2.3.3-debian-9-r14
## Set to true if you would like to see extra information on logs
## It turns BASH and NAMI debugging in minideb
## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
@@ -178,7 +178,7 @@ elasticsearch:
image:
registry: docker.io
repository: bitnami/elasticsearch
tag: 6.8.4-debian-9-r10
tag: 6.8.4-debian-9-r13
## Enable to perform the sysctl operation
sysctlImage:
enabled: true
@@ -338,7 +338,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/apache-exporter
tag: 0.7.0-debian-9-r101
tag: 0.7.0-debian-9-r104
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.

View File

@@ -1,7 +1,7 @@
apiVersion: v1
name: mariadb-galera
version: 0.5.0
appVersion: 10.3.18
version: 0.5.1
appVersion: 10.3.20
description: MariaDB Galera is a multi-master database cluster solution for synchronous replication and high availability.
keywords:
- mariadb

View File

@@ -14,7 +14,7 @@
image:
registry: docker.io
repository: bitnami/mariadb-galera
tag: 10.3.18-debian-9-r32
tag: 10.3.20-debian-9-r0
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -416,7 +416,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/mysqld-exporter
tag: 0.12.1-debian-9-r74
tag: 0.12.1-debian-9-r96
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.

View File

@@ -14,7 +14,7 @@
image:
registry: docker.io
repository: bitnami/mariadb-galera
tag: 10.3.18-debian-9-r32
tag: 10.3.20-debian-9-r0
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -416,7 +416,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/mysqld-exporter
tag: 0.12.1-debian-9-r74
tag: 0.12.1-debian-9-r96
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.

View File

@@ -1,7 +1,7 @@
apiVersion: v1
name: memcached
version: 3.0.9
appVersion: 1.5.19
version: 3.0.16
appVersion: 1.5.20
description: Chart for Memcached
keywords:
- memcached

View File

@@ -13,7 +13,7 @@
image:
registry: docker.io
repository: bitnami/memcached
tag: 1.5.19-debian-9-r29
tag: 1.5.20-debian-9-r2
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -90,7 +90,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/memcached-exporter
tag: 0.6.0-debian-9-r67
tag: 0.6.0-debian-9-r72
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.

View File

@@ -13,7 +13,7 @@
image:
registry: docker.io
repository: bitnami/memcached
tag: 1.5.19-debian-9-r29
tag: 1.5.20-debian-9-r2
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -90,7 +90,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/memcached-exporter
tag: 0.6.0-debian-9-r67
tag: 0.6.0-debian-9-r72
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.

View File

@@ -1,6 +1,6 @@
apiVersion: v1
name: mongodb-sharded
version: 0.1.0
version: 0.1.1
appVersion: 4.0.13
description: NoSQL document-oriented database that stores JSON-like documents with dynamic schemas, simplifying the integration of data in content-driven applications. Sharded topology.
keywords:

View File

@@ -50,7 +50,7 @@ spec:
command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.persistence.mountPath }}"]
securityContext:
runAsUser: 0
resources: {{ toYaml .Values.volumePermissions.resources | nindent 10 }}
resources: {{ toYaml .Values.volumePermissions.resources | nindent 12 }}
volumeMounts:
- name: datadir
mountPath: {{ .Values.persistence.mountPath }}

View File

@@ -52,7 +52,7 @@ spec:
command: ["chown", "-R", "{{ $.Values.securityContext.runAsUser }}:{{ $.Values.securityContext.fsGroup }}", "{{ $.Values.persistence.mountPath }}"]
securityContext:
runAsUser: 0
resources: {{ toYaml $.Values.volumePermissions.resources | nindent 10 }}
resources: {{ toYaml $.Values.volumePermissions.resources | nindent 12 }}
volumeMounts:
- name: datadir
mountPath: {{ $.Values.persistence.mountPath }}

View File

@@ -1,6 +1,6 @@
apiVersion: v1
name: mxnet
version: 1.3.12
version: 1.3.18
appVersion: 1.5.1
description: A flexible and efficient library for deep learning
keywords:

View File

@@ -14,7 +14,7 @@
image:
registry: docker.io
repository: bitnami/mxnet
tag: 1.5.1-debian-9-r19
tag: 1.5.1-debian-9-r25
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -64,7 +64,7 @@ volumePermissions:
git:
registry: docker.io
repository: bitnami/git
tag: 2.24.0-debian-9-r1
tag: 2.24.0-debian-9-r8
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.

View File

@@ -14,7 +14,7 @@
image:
registry: docker.io
repository: bitnami/mxnet
tag: 1.5.1-debian-9-r19
tag: 1.5.1-debian-9-r25
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -64,7 +64,7 @@ volumePermissions:
git:
registry: docker.io
repository: bitnami/git
tag: 2.24.0-debian-9-r1
tag: 2.24.0-debian-9-r8
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.

View File

@@ -1,6 +1,6 @@
apiVersion: v1
name: mysql
version: 6.5.0
version: 6.5.1
appVersion: 8.0.18
description: Chart to create a Highly available MySQL cluster
keywords:

View File

@@ -14,7 +14,7 @@
image:
registry: docker.io
repository: bitnami/mysql
tag: 8.0.18-debian-9-r0
tag: 8.0.18-debian-9-r21
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -305,7 +305,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/mysqld-exporter
tag: 0.12.1-debian-9-r76
tag: 0.12.1-debian-9-r100
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.

View File

@@ -14,7 +14,7 @@
image:
registry: docker.io
repository: bitnami/mysql
tag: 8.0.18-debian-9-r0
tag: 8.0.18-debian-9-r21
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -305,7 +305,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/mysqld-exporter
tag: 0.12.1-debian-9-r76
tag: 0.12.1-debian-9-r100
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.

View File

@@ -1,6 +1,6 @@
apiVersion: v1
name: nginx-ingress-controller
version: 5.0.10
version: 5.1.0
appVersion: 0.26.1
description: Chart for the nginx Ingress controller
keywords:

View File

@@ -43,6 +43,9 @@ Create a default fully qualified default backend name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "nginx-ingress.defaultBackend.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- printf "%s-%s" .Values.fullnameOverride .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- printf "%s-%s" .Release.Name .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}}
@@ -50,6 +53,7 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
{{- printf "%s-%s-%s" .Release.Name $name .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use

View File

@@ -13,7 +13,7 @@ name: controller
image:
registry: docker.io
repository: bitnami/nginx-ingress-controller
tag: 0.26.1-debian-9-r0
tag: 0.26.1-debian-9-r23
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -398,7 +398,7 @@ defaultBackend:
image:
registry: docker.io
repository: bitnami/nginx
tag: 1.16.1-debian-9-r61
tag: 1.16.1-debian-9-r86
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.

Some files were not shown because too many files have changed in this diff Show More