Merge branch 'master' into MemcachedProd

This commit is contained in:
Javier J. Salmerón-García
2018-11-21 13:29:13 +01:00
committed by GitHub
431 changed files with 8446 additions and 2620 deletions

View File

@@ -29,11 +29,11 @@ $ helm search bitnami
- [Parse](https://github.com/helm/charts/tree/master/stable/parse)
- [Phabricator](https://github.com/helm/charts/tree/master/stable/phabricator)
- [phpBB](https://github.com/helm/charts/tree/master/stable/phpbb)
- [PostgreSQL](https://github.com/helm/charts/tree/master/stable/postgresql)
- [PrestaShop](https://github.com/helm/charts/tree/master/stable/prestashop)
- [RabbitMQ](https://github.com/helm/charts/tree/master/stable/rabbitmq)
- [Redis](https://github.com/helm/charts/tree/master/stable/redis)
- [Redmine](https://github.com/helm/charts/tree/master/stable/redmine)
- [SugarCRM](https://github.com/helm/charts/tree/master/stable/sugarcrm)
- [SuiteCRM](https://github.com/helm/charts/tree/master/stable/suitecrm)
- [TestLink](https://github.com/helm/charts/tree/master/stable/testlink)
- [WordPress](https://github.com/helm/charts/tree/master/stable/wordpress)
@@ -41,7 +41,7 @@ $ helm search bitnami
## Bitnami charts
- [Apache](https://github.com/bitnami/charts/tree/master/bitnami/apache)
- [Consul](https://github.com/bitnami/charts/tree/master/bitnami/consul)
- [HashiCorp Consul](https://github.com/bitnami/charts/tree/master/bitnami/consul)
- [Elasticsearch](https://github.com/bitnami/charts/tree/master/bitnami/elasticsearch)
- [etcd](https://github.com/bitnami/charts/tree/master/bitnami/etcd)
- [Jenkins](https://github.com/bitnami/charts/tree/master/bitnami/jenkins)
@@ -51,7 +51,6 @@ $ helm search bitnami
- [MySQL](https://github.com/bitnami/charts/tree/master/bitnami/mysql)
- [nginx](https://github.com/bitnami/charts/tree/master/bitnami/nginx)
- [NodeJS](https://github.com/bitnami/charts/tree/master/bitnami/node)
- [PostgreSQL](https://github.com/bitnami/charts/tree/master/bitnami/postgresql)
- [TensorFlow Inception](https://github.com/bitnami/charts/tree/master/bitnami/tensorflow-inception)
- [Tomcat](https://github.com/bitnami/charts/tree/master/bitnami/tomcat)
- [WildFly](https://github.com/bitnami/charts/tree/master/bitnami/wildfly)

View File

@@ -1,6 +1,6 @@
name: apache
version: 2.0.2
appVersion: 2.4.35
version: 2.1.2
appVersion: 2.4.37
description: Chart for Apache HTTP Server
keywords:
- apache

View File

@@ -46,6 +46,7 @@ The following tables lists the configurable parameters of the Apache chart and t
| Parameter | Description | Default |
| --------------------------------- | ------------------------------------- | --------------------------------------------------------- |
| `global.imageRegistry` | Global Docker image registry | `nil` |
| `image.registry` | Apache image registry | `docker.io` |
| `image.repository` | Apache Image name | `bitnami/apache` |
| `image.tag` | Apache Image tag | `{VERSION}` |

View File

@@ -14,3 +14,26 @@ We truncate at 24 chars because some Kubernetes name fields are limited to this
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 24 -}}
{{- end -}}
{{/*
Return the proper Apache image name
*/}}
{{- define "apache.image" -}}
{{- $registryName := .Values.image.registry -}}
{{- $repositoryName := .Values.image.repository -}}
{{- $tag := .Values.image.tag | toString -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}}
{{- if .Values.global }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- end -}}

View File

@@ -29,7 +29,7 @@ spec:
{{- end }}
containers:
- name: {{ template "fullname" . }}
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
image: "{{ template "apache.image" . }}"
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
ports:
- name: http

View File

@@ -1,10 +1,16 @@
## Global Docker image registry
## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value
##
# global:
# imageRegistry:
## Bitnami Apache image version
## ref: https://hub.docker.com/r/bitnami/apache/tags/
##
image:
registry: docker.io
repository: bitnami/apache
tag: 2.4.35-debian-9
tag: 2.4.37
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
@@ -14,4 +20,4 @@ image:
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistrKeySecretName
# - myRegistrKeySecretName

View File

@@ -0,0 +1,17 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
OWNERS

View File

@@ -0,0 +1,12 @@
name: cassandra
version: 0.1.0
appVersion: 3.11.3
description: Apache Cassandra is a free and open-source distributed database management system designed to handle large amounts of data across many commodity servers, providing high availability with no single point of failure. Cassandra offers robust support for clusters spanning multiple datacenters, with asynchronous masterless replication allowing low latency operations for all clients.
icon: https://d33np9n32j53g7.cloudfront.net/assets/stacks/cassandra/img/cassandra-stack-220x234-071ca9e210d165c3972d41ff9f96bd60.png
sources:
- https://github.com/bitnami/bitnami-docker-cassandra
home: http://cassandra.apache.org
maintainers:
- name: Bitnami
email: containers@bitnami.com
engine: gotpl

137
bitnami/cassandra/README.md Normal file
View File

@@ -0,0 +1,137 @@
# cassandra
[cassandra](https://cassandra.apache.org) Apache Cassandra is a free and open-source distributed database management system designed to handle large amounts of data across many commodity servers or datacenters.
## TL;DR;
```console
$ helm repo add bitnami https://charts.bitnami.com/bitnami
$ helm install bitnami/cassandra
```
## Introduction
This chart bootstraps a [Cassandra](https://github.com/bitnami/bitnami-docker-cassandra) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters.
## Prerequisites
- Kubernetes 1.8+
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
```console
$ helm install --name my-release bitnami/cassandra
```
The command deploys one node with Cassandra on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` release:
```console
$ helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of the cassandra chart and their default values.
| Parameter | Description | Default |
|--------------------------------------------|----------------------------------------------------------------------------------------------------------------|------------------------------------------------------|
| `global.imageRegistry` | Global Docker Image registry | `nil` |
| `image.registry` | Cassandra Image registry | `docker.io` |
| `image.repository` | Cassandra Image name | `bitnami/cassandra` |
| `image.tag` | Cassandra Image tag | `{VERSION}` |
| `image.pullPolicy` | Image pull policy | `Always` |
| `image.pullSecrets` | Specify `docker-registry` secret names as an array | `nil` |
| `service.type` | Kubernetes Service type | `ClusterIP` |
| `service.nodePort` | Kubernetes Service nodePort | `nil` |
| `service.loadBalancerIP` | LoadBalancerIP if service type is `LoadBalancer` | `nil` |
| `service.annotations` | Annotations for the service | {} |
| `persistence.enabled` | Use PVCs to persist data | `true` |
| `persistence.storageClass` | Persistent Volume Storage Class | `generic` |
| `persistence.annotations` | Persistent Volume Claim annotations Annotations | {} |
| `persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` |
| `persistence.size` | Persistent Volume Size | `8Gi` |
| `resources` | CPU/Memory resource requests/limits | `{}` |
| `cluster.name` | Cassandra cluster name | `cassandra` |
| `cluster.replicaCount` | Number of Cassandra nodes | `1` |
| `cluster.seedCount` | Number of seed nodes (note: must be greater or equal than 1 and less or equal to `cluster.replicaCount`) | `1` |
| `cluster.numTokens` | Number of tokens for each node | `256` |
| `cluster.datacenter` | Datacenter name | `dc1` |
| `cluster.rack` | Rack name | `rack1` |
| `cluster.enableRPC` | Enable Thrift RPC endpoint | `true` |
| `cluster.jvm.extraOpts` | Set the value for Java Virtual Machine extra optinos (JVM_EXTRA_OPTS) | `nil` |
| `cluster.jvm.maxHeapSize` | Set Java Virtual Machine maximum heap size (MAX_HEAP_SIZE). Calculated automatically if `nil` | `nil` |
| `cluster.jvm.newHeapSize` | Set Java Virtual Machine new heap size (HEAP_NEWSIZE). Calculated automatically if `nil` | `nil` |
| `dbUser.user` | Cassandra admin user | `cassandra` |
| `dbUser.forcePassword` | Force the user to provide a non-empty password for `dbUser.user` | `false` |
| `dbUser.password` | Password for `dbUser.user`. Randomly generated if empty | (Random generated) |
| `dbUser.existingSecret` | Use an existing secret object for `dbUser.user` password (will ignore `dbUser.password`) | `nil` |
| `startupCQL` | Startup CQL commands (done in the first node). Useful for creating keyspaces at startup, for instance | `nil` |
| `livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | `1` |
| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` |
| `readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `readinessProbe.initialDelaySeconds`| Delay before readiness probe is initiated | `5` |
| `readinessProbe.periodSeconds` | How often to perform the probe | `10` |
| `readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | `1` |
| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` |
| `podAnnotations` | Additional pod annotations | `{}` |
| `podLabels` | Additional pod labels | `{}` |
| `statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete |
| `statefulset.rollingUpdatePartition` | Partition update strategy | `nil` |
| `securityContext.enabled` | Enable security context | `true` |
| `securityContext.fsGroup` | Group ID for the container | `1001` |
| `securityContext.runAsUser` | User ID for the container | `1001` |
| `affinity` | Enable node/pod affinity | {} |
| `tolerations` | Toleration labels for pod assignment | [] |
| `networkPolicy.enabled` | Enable NetworkPolicy | `false` |
| `networkPolicy.allowExternal` | Don't require client label for connections | `true` |
| `metrics.enabled` | Start a side-car prometheus exporter | `false` |
| `metrics.image.registry` | Cassandra exporter Image registry | `docker.io` |
| `metrics.image.repository` | Cassandra exporter Image name | `criteo/cassandra_exporter` |
| `metrics.image.tag` | Cassandra exporter Image tag | `2.0.4` |
| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `metrics.image.pullSecrets` | Specify `docker-registry` secret names as an array | `nil` |
| `metrics.podAnnotations` | Additional annotations for Metrics exporter | `{prometheus.io/scrape: "true", prometheus.io/port: "8080"}` |
| `metrics.resources` | Exporter resource requests/limit | `{}` |
The above parameters map to the env variables defined in [bitnami/cassandra](http://github.com/bitnami/bitnami-docker-cassandra). For more information please refer to the [bitnami/cassandra](http://github.com/bitnami/bitnami-docker-cassandra) image documentation.
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
$ helm install --name my-release \
--set dbUser.user=admin,dbUser.password=password\
bitnami/cassandra
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
$ helm install --name my-release -f values.yaml bitnami/cassandra
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## Persistence
The [Bitnami cassandra](https://github.com/bitnami/bitnami-docker-cassandra) image stores the cassandra data at the `/bitnami/cassandra` path of the container.
Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube.
See the [Configuration](#configuration) section to configure the PVC or to disable persistence.

View File

@@ -0,0 +1,59 @@
** Please be patient while the chart is being deployed **
Cassandra can be accessed through the following URLs from within the cluster:
- CQL: {{ template "cassandra.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.cqlPort }}
- Thrift: {{ template "cassandra.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.thriftPort }}
To get your password run:
export CASSANDRA_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "cassandra.fullname" . }} -o jsonpath="{.data.cassandra-password}" | base64 --decode)
Check the cluster status by running:
kubectl exec -it --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "cassandra.name" . }},release={{ .Release.Name }} -o jsonpath='{.items[0].metadata.name}') nodetool status
To connect to your Cassandra cluster using CQL:
1. Run a Cassandra pod that you can use as a client:
kubectl run --namespace {{ .Release.Namespace }} {{ template "cassandra.fullname" . }}-client --rm --tty -i \
--env CASSANDRA_PASSWORD=$CASSANDRA_PASSWORD \
{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "cassandra.name" . }}-client=true"{{ end }} \
--image {{ template "cassandra.image" . }} -- bash
2. Connect using the cqlsh client:
cqlsh -u {{ .Values.dbUser.user }} -p $CASSANDRA_PASSWORD {{ template "cassandra.fullname" . }}
{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}
Note: Since NetworkPolicy is enabled, only pods with label
"{{ template "cassandra.fullname" . }}-client=true"
will be able to connect to Cassandra.
{{- else -}}
To connect to your database from outside the cluster execute the following commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "cassandra.fullname" . }})
cqlsh -u {{ .Values.dbUser.user }} -p $CASSANDRA_PASSWORD $NODE_IP $NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "redis.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
cqlsh -u {{ .Values.dbUser.user }} -p $CASSANDRA_PASSWORD $SERVICE_IP
{{- else if contains "ClusterIP" .Values.service.type }}
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "cassandra.fullname" . }} {{ .Values.service.cqlPort }}:{{ .Values.service.cqlPort }} &
cqlsh -u {{ .Values.dbUser.user }} -p $CASSANDRA_PASSWORD 127.0.0.1 {{ .Values.service.cqlPort }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,77 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "cassandra.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for networkpolicy.
*/}}
{{- define "networkPolicy.apiVersion" -}}
{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}}
{{- print "extensions/v1beta1" -}}
{{- else -}}
{{- print "networking.k8s.io/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "cassandra.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "cassandra.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Return the proper Cassandra image name
*/}}
{{- define "cassandra.image" -}}
{{- $registryName := .Values.image.registry -}}
{{- $repositoryName := .Values.image.repository -}}
{{- $tag := .Values.image.tag | toString -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}}
{{- if .Values.global }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- end -}}
{{/*
Return the proper metrics image name
*/}}
{{- define "cassandra.metrics.image" -}}
{{- $registryName := .Values.metrics.image.registry -}}
{{- $repositoryName := .Values.metrics.image.repository -}}
{{- $tag := .Values.metrics.image.tag | toString -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}

View File

@@ -0,0 +1,20 @@
{{- if (not .Values.dbUser.existingSecret) -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "cassandra.fullname" . }}
labels:
app: {{ template "cassandra.name" . }}
chart: {{ template "cassandra.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
type: Opaque
data:
{{- if .Values.dbUser.password }}
cassandra-password: {{ .Values.dbUser.password | b64enc | quote }}
{{- else if (not .Values.dbUser.forcePassword) }}
cassandra-password: {{ randAlphaNum 10 | b64enc | quote }}
{{ else }}
cassandra-password: {{ required "A Cassandra Password is required!" .Values.dbUser.password }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,33 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "cassandra.fullname" . }}-headless
labels:
app: {{ template "cassandra.name" . }}
chart: {{ template "cassandra.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: intra
port: 7000
targetPort: intra
- name: tls
port: 7001
targetPort: tls
- name: jmx
port: 7199
targetPort: jmx
- name: cql
port: {{ .Values.service.cqlPort }}
targetPort: cql
{{- if .Values.cluster.enableRPC }}
- name: thrift
port: {{ .Values.service.thriftPort }}
targetPort: thrift
{{- end }}
selector:
app: {{ template "cassandra.name" . }}
release: {{ .Release.Name }}

View File

@@ -0,0 +1,35 @@
{{- if .Values.networkPolicy.enabled }}
kind: NetworkPolicy
apiVersion: {{ template "networkPolicy.apiVersion" . }}
metadata:
name: {{ template "cassandra.fullname" . }}
labels:
app: {{ template "cassandra.name" . }}
chart: {{ template "cassandra.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
podSelector:
matchLabels:
app: {{ template "cassandra.name" . }}
release: {{ .Release.Name }}
ingress:
# Allow inbound connections
- ports:
- port: {{ .Values.service.cqlPort }}
{{- if not .Values.networkPolicy.allowExternal }}
from:
- podSelector:
matchLabels:
{{ template "cassandra.fullname" . }}-client: "true"
{{- end }}
- podSelector:
matchLabels:
app: {{ template "cassandra.name" . }}
release: {{ .Release.Name }}
{{- if .Values.metrics.enabled }}
# Allow prometheus scrapes for metrics
- ports:
- port: 8080
{{- end }}
{{- end }}

View File

@@ -0,0 +1,29 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "cassandra.fullname" . }}
labels:
app: {{ template "cassandra.name" . }}
chart: {{ template "cassandra.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.service.nodePort }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
{{ if eq .Values.service.type "LoadBalancer" -}} {{ if .Values.service.loadBalancerIP -}}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end -}} {{- end }}
type: {{ .Values.service.type }}
ports:
- name: cql
port: {{ .Values.service.cqlPort }}
targetPort: cql
{{- if .Values.cluster.enableRPC }}
- name: thrift
port: {{ .Values.service.thriftPort }}
targetPort: thrift
{{- end }}
selector:
app: {{ template "cassandra.name" . }}
release: {{ .Release.Name }}

View File

@@ -0,0 +1,212 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ template "cassandra.fullname" . }}
labels:
app: {{ template "cassandra.name" . }}
chart: {{ template "cassandra.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
selector:
matchLabels:
app: {{ template "cassandra.name" . }}
release: {{ .Release.Name }}
serviceName: {{ template "cassandra.fullname" . }}-headless
replicas: {{ .Values.cluster.replicaCount }}
updateStrategy:
type: {{ .Values.statefulset.updateStrategy }}
{{- if .Values.statefulset.rollingUpdatePartition }}
rollingUpdate:
partition: {{ .Values.statefulset.rollingUpdatePartition }}
{{- end }}
template:
metadata:
labels:
app: {{ template "cassandra.name" . }}
release: {{ .Release.Name }}
{{- if .Values.podLabels }}
{{ toYaml .Values.podLabels | indent 8 }}
{{- end }}
{{- if or .Values.podAnnotations .Values.metrics.enabled }}
annotations:
{{- if .Values.podAnnotations }}
{{ toYaml .Values.podAnnotations | indent 8 }}
{{- end }}
{{- if .Values.metrics.enabled }}
{{ toYaml .Values.metrics.podAnnotations | indent 8 }}
{{- end }}
{{- end }}
spec:
{{- if .Values.selector }}
{{ toYaml .Values.selector | indent 6 }}
{{- end }}
{{- if .Values.securityContext.enabled }}
securityContext:
fsGroup: {{ .Values.securityContext.fsGroup }}
runAsUser: {{ .Values.securityContext.runAsUser }}
{{- end }}
{{- if .Values.affinity }}
affinity:
{{ toYaml .Values.affinity | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
containers:
- name: cassandra
command:
- bash
- -ec
# Node 0 is the password seeder
- |
if [[ $HOSTNAME =~ (.*)-0$ ]]; then
echo "Setting node as password seeder"
export CASSANDRA_PASSWORD_SEEDER=yes
fi
/app-entrypoint.sh /run.sh
image: {{ template "cassandra.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
resources:
{{ toYaml .Values.resources | indent 10 }}
env:
- name: CASSANDRA_CLUSTER_NAME
value: {{ .Values.cluster.name }}
- name: CASSANDRA_SEEDS
{{- $global := . }}
{{- $replicas := .Values.cluster.seedCount | int }}
value: "{{- range $i, $e := until $replicas }}{{ template "cassandra.fullname" $global }}-{{ $i }}.{{ template "cassandra.fullname" $global }}-headless.{{ $global.Release.Namespace }}.svc.cluster.local{{- if (lt ( add1 $i ) $replicas ) }},{{- end }}{{- end }}"
- name: CASSANDRA_PASSWORD
valueFrom:
secretKeyRef:
name: {{ if .Values.dbUser.existingSecret }}{{ .Values.dbUser.existingSecret }}{{- else }}{{ template "cassandra.fullname" . }}{{- end }}
key: cassandra-password
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: CASSANDRA_NUM_TOKENS
value: {{ .Values.cluster.numTokens | quote }}
- name: CASSANDRA_DATACENTER
value: {{ .Values.cluster.datacenter }}
- name: CASSANDRA_ENDPOINT_SNITCH
value: {{ .Values.cluster.endpointSnitch }}
- name: CASSANDRA_RACK
value: {{ .Values.cluster.rack }}
{{- if .Values.jvm.maxHeapSize }}
- name: MAX_HEAP_SIZE
value: {{ .Values.jvm.maxHeapSize | quote }}
{{- end }}
{{- if .Values.jvm.newHeapSize }}
- name: HEAP_NEWSIZE
value: {{ .Values.jvm.newHeapSize | quote }}
{{- end }}
{{- if .Values.jvm.extraOpts }}
- name: JVM_EXTRA_OPTS
value: {{ .Values.jvm.extraOpts | quote }}
{{- end }}
{{- if .Values.startupCQL }}
- name: CASSANDRA_STARTUP_CQL
value: {{ .Values.startupCQL | quote }}
{{- end }}
- name: CASSANDRA_ENABLE_RPC
value: {{ .Values.cluster.enableRPC | quote }}
{{- if .Values.livenessProbe.enabled }}
livenessProbe:
exec:
command: [ "/bin/sh", "-c", "nodetool status" ]
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.readinessProbe.enabled }}
readinessProbe:
exec:
command: [ "/bin/sh", "-c", "nodetool status | grep -E \"^UN\\s+${POD_IP}\"" ]
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
{{- end }}
ports:
- name: intra
containerPort: 7000
- name: tls
containerPort: 7001
- name: jmx
containerPort: 7199
- name: cql
containerPort: 9042
{{- if .Values.cluster.enableRPC }}
- name: thrift
containerPort: 9160
{{- end }}
volumeMounts:
- name: data
mountPath: /bitnami/cassandra
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
- name: {{ .Values.image.pullSecrets }}
{{- end }}
{{- if .Values.metrics.enabled }}
- name: metrics
image: {{ template "cassandra.metrics.image" . }}
imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
{{- if .Values.metrics.image.pullSecrets }}
imagePullSecrets:
- name: {{ .Values.metrics.image.pullSecrets }}
{{- end }}
ports:
- name: metrics
containerPort: 8080
protocol: TCP
- name: jmx
containerPort: 5555
livenessProbe:
tcpSocket:
port: metrics
readinessProbe:
httpGet:
path: /metrics
port: metrics
initialDelaySeconds: 20
timeoutSeconds: 45
{{- end }}
{{- if not .Values.persistence.enabled }}
volumes:
- name: data
emptyDir: {}
{{- else }}
volumeClaimTemplates:
- metadata:
name: data
labels:
app: {{ template "cassandra.name" . }}
chart: {{ template "cassandra.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.persistence.annotations }}
annotations:
{{ toYaml .Values.persistence.annotations | indent 8 }}
{{- end }}
spec:
accessModes:
{{- range .Values.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.storageClass }}
{{- if (eq "-" .Values.persistence.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: {{ .Values.persistence.storageClass | quote }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,205 @@
## Global Docker image registry
## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value
##
# global:
# imageRegistry:
## Bitnami Cassandra image version
## ref: https://hub.docker.com/r/bitnami/cassandra/tags/
##
image:
registry: docker.io
repository: bitnami/cassandra
## Bitnami Cassandra image tag
## ref: https://github.com/bitnami/bitnami-docker-cassandra#supported-tags-and-respective-dockerfile-links
##
tag: 3.11.3
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistrKeySecretName
## Specify a service type
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
type: ClusterIP
cqlPort: 9042
thriftPort: 9160
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort: 30001
# loadBalancerIP:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
## If true, use a Persistent Volume Claim, If false, use emptyDir
##
enabled: true
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
## Persistent Volume Claim annotations
##
annotations:
## Persistent Volume Access Mode
##
accessModes:
- ReadWriteOnce
## Persistent Volume size
##
size: 8Gi
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
## Minimum memory for development is 4GB and 2 CPU cores
## Minimum memory for production is 8GB and 4 CPU cores
## ref: http://docs.datastax.com/en/archived/cassandra/2.0/cassandra/architecture/architecturePlanningHardware_c.html
##
resources: {}
# requests:
# memory: 4Gi
# cpu: 2
# limits:
# memory: 4Gi
# cpu: 2
## Cluster parameters
##
cluster:
name: cassandra
replicaCount: 3
seedCount: 2
numTokens: 256
endpointSnitch: SimpleSnitch
datacenter: dc1
rack: rack1
enableRPC: true
jvm:
## Extra JVM Settings
##
extraOpts:
## Memory settings: These are calculated automatically
## unless specified otherwise
##
# maxHeapSize: 4G
# newHeapSize: 800M
## Database credentials
##
dbUser:
user: cassandra
forcePassword: true
# password:
# existingSecret:
## Startup CQL commands. Useful for creating a keyspace
## and pre-populating data
##
# startupCQL: "CREATE KEYSPACE IF NOT EXISTS example_keyspace WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1};"
## Liveness and Readiness probe values.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
##
livenessProbe:
enabled: true
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
enabled: true
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
## Additional pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Additional pod labels
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
##
podLabels: {}
## StatefulSet settings
##
statefulset:
updateStrategy: OnDelete
# rollingUpdatePartition:
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Network policies
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
##
networkPolicy:
## Specifies whether a NetworkPolicy should be created
##
enabled: true
## The Policy model to apply. When set to false, only pods with the correct
## client label will have network access to the port Redis is listening
## on. When true, Redis will accept connections from any source
## (with the correct destination port).
##
# allowExternal: true
## Cassandra exporter configuration
## Ref: https://github.com/criteo/cassandra_exporter
##
metrics:
enabled: true
image:
registry: docker.io
pullPolicy: IfNotPresent
repository: criteord/cassandra_exporter
tag: 2.0.4
# pullSecrets:
resources: {}
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8080"

View File

@@ -0,0 +1,205 @@
## Global Docker image registry
## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value
##
# global:
# imageRegistry:
## Bitnami Cassandra image version
## ref: https://hub.docker.com/r/bitnami/cassandra/tags/
##
image:
registry: docker.io
repository: bitnami/cassandra
## Bitnami Cassandra image tag
## ref: https://github.com/bitnami/bitnami-docker-cassandra#supported-tags-and-respective-dockerfile-links
##
tag: 3.11.3
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistrKeySecretName
## Specify a service type
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
type: ClusterIP
cqlPort: 9042
thriftPort: 9160
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort: 30001
# loadBalancerIP:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
## If true, use a Persistent Volume Claim, If false, use emptyDir
##
enabled: true
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
## Persistent Volume Claim annotations
##
annotations:
## Persistent Volume Access Mode
##
accessModes:
- ReadWriteOnce
## Persistent Volume size
##
size: 8Gi
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
## Minimum memory for development is 4GB and 2 CPU cores
## Minimum memory for production is 8GB and 4 CPU cores
## ref: http://docs.datastax.com/en/archived/cassandra/2.0/cassandra/architecture/architecturePlanningHardware_c.html
##
resources: {}
# requests:
# memory: 4Gi
# cpu: 2
# limits:
# memory: 4Gi
# cpu: 2
## Cluster parameters
##
cluster:
name: cassandra
replicaCount: 1
seedCount: 1
numTokens: 256
datacenter: dc1
rack: rack1
enableRPC: true
endpointSnitch: SimpleSnitch
jvm:
## Extra JVM Settings
##
extraOpts:
## Memory settings: These are calculated automatically
## unless specified otherwise
##
# maxHeapSize: 4G
# newHeapSize: 800M
## Database credentials
##
dbUser:
user: cassandra
forcePassword: false
# password:
# existingSecret:
## Startup CQL commands. Useful for creating a keyspace
## and pre-populating data
##
# startupCQL: "CREATE KEYSPACE IF NOT EXISTS example_keyspace WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1};"
## Liveness and Readiness probe values.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
##
livenessProbe:
enabled: true
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
enabled: true
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
## Additional pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Additional pod labels
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
##
podLabels: {}
## StatefulSet settings
##
statefulset:
updateStrategy: OnDelete
# rollingUpdatePartition:
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Network policies
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
##
networkPolicy:
## Specifies whether a NetworkPolicy should be created
##
enabled: false
## The Policy model to apply. When set to false, only pods with the correct
## client label will have network access to the port Redis is listening
## on. When true, Redis will accept connections from any source
## (with the correct destination port).
##
# allowExternal: true
## Cassandra exporter configuration
## Ref: https://github.com/criteo/cassandra_exporter
##
metrics:
enabled: false
image:
registry: docker.io
pullPolicy: IfNotPresent
repository: criteord/cassandra_exporter
tag: 2.0.4
# pullSecrets:
resources: {}
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8080"

View File

@@ -1,12 +1,12 @@
name: consul
home: https://www.consul.io/
sources:
- https://github.com/bitnami/consul
version: 2.1.1
appVersion: 1.3.0
version: 2.4.3
appVersion: 1.4.0
description: Highly available and distributed service discovery and key-value store
designed with support for the modern data center to make distributed systems and
configuration easy.
home: https://www.consul.io/
sources:
- https://github.com/bitnami/consul
icon: https://raw.githubusercontent.com/hashicorp/consul/bce3809dfca37b883828c3715b84143dd71c0f85/website/source/assets/images/favicons/android-chrome-512x512.png
maintainers:
- name: Bitnami

View File

@@ -1,6 +1,6 @@
# Consul Helm Chart
# HashiCorp Consul Helm Chart
[Consul](https://www.consul.io/) has multiple components, but as a whole, it is a tool for discovering and configuring services in your infrastructure
[HashiCorp Consul](https://www.consul.io/) has multiple components, but as a whole, it is a tool for discovering and configuring services in your infrastructure
## TL;DR
@@ -11,7 +11,7 @@ $ helm install bitnami/consul
## Introduction
This chart bootstraps a [Consul](https://github.com/bitnami/bitnami-docker-consul) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
This chart bootstraps a [HashiCorp Consul](https://github.com/bitnami/bitnami-docker-consul) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters.
@@ -28,7 +28,7 @@ To install the chart with the release name `my-release`:
$ helm install --name my-release bitnami/consul
```
The command deploys Consul on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
The command deploys HashiCorp Consul on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
@@ -47,62 +47,66 @@ $ helm delete --purge my-release
## Configuration
The following tables lists the configurable parameters of the Consul chart and their default values.
The following tables lists the configurable parameters of the HashiCorp Consul chart and their default values.
| Parameter | Description | Default |
| ------------------------------------ | ------------------------------------------------------ | ---------------------------------------------------------- |
| `image.registry` | Consul image registry | `docker.io` |
| `image.repository` | Consul image name | `bitnami/consul` |
| `image.tag` | Consul image tag | `{VERSION}` |
| `image.pullPolicy` | Image pull policy | `Always` |
| `image.pullSecrets` | Specify image pull secrets | `nil` |
| `replicas` | Number of replicas | `3` |
| `httpPort` | Consul http listening port | `8500` |
| `rpcPort` | Consul rpc listening port | `8400` |
| `serflanPort` | Container serf lan listening port | `8301` |
| `serverPort` | Container server listening port | `8300` |
| `consulDnsPort` | Container dns listening port | `8600` |
| `uiPort` | Consul UI port | `80` |
| `datacenterName` | Consul datacenter name | `dc1` |
| `gossipKey` | Gossip key for all members | `nil` |
| `domain` | Consul domain | `consul` |
| `clientAddress` | Address in which Consul will bind client interfaces | `0.0.0.0` |
| `serflanAddress` | Address used for Serf LAN communications | `0.0.0.0` |
| `raftMultiplier` | Multiplier used to scale key Raft timing parameters | `10Gi` |
| `persistence.enabled` | Use a PVC to persist data | `true` |
| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) |
| `persistence.accessMode` | Use volume as ReadOnly or ReadWrite | `ReadWriteOnce` |
| `persistence.size` | Size of data volume | `8Gi` |
| `persistence.annotations` | Annotations for the persistent volume | `nil` |
| `resources` | Container resource requests and limits | `{}` |
| `maxUnavailable` | Pod disruption Budget maxUnavailable | `1` |
| `nodeAffinity` | Consul pod node-affinity setting | `nil` |
| `antiAffinity` | Consul pod anti-affinity setting | `soft` |
| `ui.service.enabled` | Use a service to access Consul Ui | `true` |
| `ui.service.type` | Kubernetes Service Type | `ClusterIP` |
| `ui.ingress.enabled` | Enable ingress controller resource | `false` |
| `ui.ingress.hosts[0].name` | Hostname to your Consul installation | `consul-ui.local` |
| `ui.ingress.hosts[0].path` | Path within the url structure | `/` |
| `ui.ingress.hosts[0].tls` | Utilize TLS backend in ingress | `false` |
| `ui.ingress.hosts[0].certManager` | Add annotations for cert-manager | `false` |
| `ui.ingress.hosts[0].tlsSecret` | TLS Secret (certificates) | `consul-ui.local-tls` |
| `ui.ingress.hosts[0].annotations` | Annotations for this host's ingress record | `[]` |
| `ui.ingress.secrets[0].name` | TLS Secret Name | `nil` |
| `ui.ingress.secrets[0].certificate` | TLS Secret Certificate | `nil` |
| `ui.ingress.secrets[0].key` | TLS Secret Key | `nil` |
| `configmap` | Consul configuration to be injected as ConfigMap | `nil` |
| `metrics.enabled` | Start a side-car prometheus exporter | `false` |
| `metrics.image` | Exporter image | `prom/consul-exporter` |
| `metrics.imageTag` | Exporter image tag | `v0.3.0` |
| `metrics.imagePullPolicy` | Exporter image pull policy | `IfNotPresent` |
| `metrics.resources` | Exporter resource requests/limit | `{}` |
| `metrics.annotations` | Exporter annotations | `{}` |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 |
| `livenessProbe.periodSeconds` | How often to perform the probe | 10 |
| `livenessProbe.timeoutSeconds` | When the probe times out | 5 |
| Parameter | Description | Default |
| ------------------------------------ | ---------------------------------------------------------------- | ---------------------------------------------------------- |
| `global.imageRegistry` | Global Docker image registry | `nil` |
| `image.registry` | HashiCorp Consul image registry | `docker.io` |
| `image.repository` | HashiCorp Consul image name | `bitnami/consul` |
| `image.tag` | HashiCorp Consul image tag | `{VERSION}` |
| `image.pullPolicy` | Image pull policy | `Always` |
| `image.pullSecrets` | Specify image pull secrets | `nil` |
| `replicas` | Number of replicas | `3` |
| `httpPort` | HashiCorp Consul http listening port | `8500` |
| `rpcPort` | HashiCorp Consul rpc listening port | `8400` |
| `serflanPort` | Container serf lan listening port | `8301` |
| `serverPort` | Container server listening port | `8300` |
| `consulDnsPort` | Container dns listening port | `8600` |
| `uiPort` | HashiCorp Consul UI port | `80` |
| `datacenterName` | HashiCorp Consul datacenter name | `dc1` |
| `gossipKey` | Gossip key for all members | `nil` |
| `domain` | HashiCorp Consul domain | `consul` |
| `clientAddress` | Address in which HashiCorp Consul will bind client interfaces | `0.0.0.0` |
| `serflanAddress` | Address used for Serf LAN communications | `0.0.0.0` |
| `raftMultiplier` | Multiplier used to scale key Raft timing parameters | `10Gi` |
| `persistence.enabled` | Use a PVC to persist data | `true` |
| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) |
| `persistence.accessMode` | Use volume as ReadOnly or ReadWrite | `ReadWriteOnce` |
| `persistence.size` | Size of data volume | `8Gi` |
| `persistence.annotations` | Annotations for the persistent volume | `nil` |
| `resources` | Container resource requests and limits | `{}` |
| `maxUnavailable` | Pod disruption Budget maxUnavailable | `1` |
| `nodeAffinity` | HashiCorp Consul pod node-affinity setting | `nil` |
| `antiAffinity` | HashiCorp Consul pod anti-affinity setting | `soft` |
| `ui.service.enabled` | Use a service to access HashiCorp Consul Ui | `true` |
| `ui.service.type` | Kubernetes Service Type | `ClusterIP` |
| `ui.service.annotations` | Annotations for HashiCorp Consul UI service | {} |
| `ui.service.loadBalancerIP` | IP if HashiCorp Consul UI service type is `LoadBalancer` | `nil` |
| `ui.ingress.enabled` | Enable ingress controller resource | `false` |
| `ui.ingress.hosts[0].name` | Hostname to your HashiCorp Consul installation | `consul-ui.local` |
| `ui.ingress.hosts[0].path` | Path within the url structure | `/` |
| `ui.ingress.hosts[0].tls` | Utilize TLS backend in ingress | `false` |
| `ui.ingress.hosts[0].certManager` | Add annotations for cert-manager | `false` |
| `ui.ingress.hosts[0].tlsSecret` | TLS Secret (certificates) | `consul-ui.local-tls` |
| `ui.ingress.hosts[0].annotations` | Annotations for this host's ingress record | `[]` |
| `ui.ingress.secrets[0].name` | TLS Secret Name | `nil` |
| `ui.ingress.secrets[0].certificate` | TLS Secret Certificate | `nil` |
| `ui.ingress.secrets[0].key` | TLS Secret Key | `nil` |
| `configmap` | HashiCorp Consul configuration to be injected as ConfigMap | `nil` |
| `metrics.enabled` | Start a side-car prometheus exporter | `false` |
| `metrics.image` | Exporter image | `prom/consul-exporter` |
| `metrics.imageTag` | Exporter image tag | `v0.3.0` |
| `metrics.imagePullPolicy` | Exporter image pull policy | `IfNotPresent` |
| `metrics.resources` | Exporter resource requests/limit | `{}` |
| `metrics.podAnnotations` | Exporter annotations | `{}` |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 |
| `livenessProbe.periodSeconds` | How often to perform the probe | 10 |
| `livenessProbe.timeoutSeconds` | When the probe times out | 5 |
| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 |
| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 |
| `podAnnotations` | Pod annotations | `{}` |
| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 |
| `readinessProbe.periodSeconds` | How often to perform the probe | 10 |
| `readinessProbe.timeoutSeconds` | When the probe times out | 5 |
@@ -114,7 +118,7 @@ Specify each parameter using the `--set key=value[,key=value]` argument to `helm
```console
$ helm install --name my-release --set domain=consul-domain,gossipKey=secretkey bitnami/consul
```
The above command sets the Consul domain to `consul-domain` and sets the gossip key to `secretkey`.
The above command sets the HashiCorp Consul domain to `consul-domain` and sets the gossip key to `secretkey`.
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
@@ -126,7 +130,7 @@ $ helm install --name my-release -f values.yaml bitnami/consul
## Persistence
The [Bitnami Consul](https://github.com/bitnami/bitnami-docker-consul) image stores the Consul data at the `/bitnami` path of the container.
The [Bitnami HashiCorp Consul](https://github.com/bitnami/bitnami-docker-consul) image stores the HashiCorp Consul data at the `/bitnami` path of the container.
Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube.
See the [Configuration](#configuration) section to configure the PVC or to disable persistence.
@@ -136,20 +140,20 @@ See the [Configuration](#configuration) section to configure the PVC or to disab
This chart provides support for ingress resources. If you have an
ingress controller installed on your cluster, such as [nginx-ingress](https://kubeapps.com/charts/stable/nginx-ingress)
or [traefik](https://kubeapps.com/charts/stable/traefik) you can utilize
the ingress controller to service your Consul UI application.
the ingress controller to service your HashiCorp Consul UI application.
To enable ingress integration, please set `ingress.enabled` to `true`
### Hosts
Most likely you will only want to have one hostname that maps to this
Consul installation, however it is possible to have more than one
HashiCorp Consul installation, however it is possible to have more than one
host. To facilitate this, the `ingress.hosts` object is an array.
For each item, please indicate a `name`, `tls`, `tlsSecret`, and any
`annotations` that you may want the ingress controller to know about.
Indicating TLS will cause Consul to generate HTTPS urls, and
Consul will be connected to at port 443. The actual secret that
Indicating TLS will cause HashiCorp Consul to generate HTTPS urls, and
HashiCorp Consul will be connected to at port 443. The actual secret that
`tlsSecret` references does not have to be generated by this chart.
However, please note that if TLS is enabled, the ingress record will not
work until this secret exists.
@@ -218,7 +222,7 @@ kubectl create secret generic consul-tls-encryption \
> Take into account that you will need to create a config map with the proper configuration.
If the secret is specified, the chart will locate those files at `/opt/bitnami/consul/certs/`, so you will want to use the below snippet to configure Consul TLS encryption in your config map:
If the secret is specified, the chart will locate those files at `/opt/bitnami/consul/certs/`, so you will want to use the below snippet to configure HashiCorp Consul TLS encryption in your config map:
```
"ca_file": "/opt/bitnami/consul/certs/ca.pem",

View File

@@ -26,7 +26,21 @@ Create chart name and version as used by the chart label.
Return the proper Consul image name
*/}}
{{- define "consul.image" -}}
{{- $registryName := default "docker.io" .Values.image.registry -}}
{{- $tag := default "latest" .Values.image.tag | toString -}}
{{- printf "%s/%s:%s" $registryName .Values.image.repository $tag -}}
{{- $registryName := .Values.image.registry -}}
{{- $repositoryName := .Values.image.repository -}}
{{- $tag := .Values.image.tag | toString -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}}
{{- if .Values.global }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- end -}}

View File

@@ -24,6 +24,15 @@ spec:
chart: {{ template "consul.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
{{- if or .Values.podAnnotations .Values.metrics.enabled }}
annotations:
{{- if .Values.podAnnotations }}
{{ toYaml .Values.podAnnotations | indent 8 }}
{{- end }}
{{- if .Values.metrics.podAnnotations }}
{{ toYaml .Values.metrics.podAnnotations | indent 8 }}
{{- end }}
{{- end }}
spec:
{{- if .Values.image.pullSecrets }}
imagePullSecrets:

View File

@@ -9,12 +9,19 @@ metadata:
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
component: ui
{{- with .Values.ui.service.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
ports:
- name: http
port: {{ .Values.uiPort }}
targetPort: http
type: "{{ .Values.ui.service.type }}"
{{- if and (eq .Values.ui.service.type "LoadBalancer") .Values.ui.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.ui.service.loadBalancerIP }}
{{- end }}
selector:
app: "{{ template "consul.name" . }}"
chart: {{ template "consul.chart" . }}

View File

@@ -0,0 +1,225 @@
## Global Docker image registry
## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value
##
# global:
# imageRegistry:
## Bitnami HashiCorp Consul image version
## ref: https://hub.docker.com/r/bitnami/consul/tags/
##
image:
registry: docker.io
repository: bitnami/consul
tag: 1.4.0
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistrKeySecretName
## Consul replicas
replicas: 3
## Consul service ports
httpPort: 8500
rpcPort: 8400
serflanPort: 8301
serverPort: 8300
consulDnsPort: 8600
uiPort: 80
## Datacenter name for consul. If not supplied, will use the consul
## default 'dc1'
datacenterName: dc1
## Predefined value for gossip key.
## The key must be 16-bytes, can be generated with $(consul keygen)
# gossipKey: 887Syd/BOvbtvRAKviazMg==
## Use TLS to verify the authenticity of servers and clients.
## Check README for more information.
# tlsEncryptionSecretName: your-already-created-secret
## Extra configuration that will be added to the default one.
#localConfig: |-
# {
# "key": "value"
# }
## Consul domain name.
domain: consul
## Consul raft multiplier.
raftMultiplier: '1'
## Consul data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
persistence:
enabled: true
# storageClass: "-"
accessModes:
- ReadWriteOnce
size: 8Gi
annotations: {}
resources: {}
# requests:
# memory: 256Mi
# cpu: 100m
## Setting maxUnavailable will create a pod disruption budget that will prevent
## voluntarty cluster administration from taking down too many consul pods. If
## you set maxUnavailable, you should set it to ceil((n/2) - 1), where
## n = Replicas. For example, if you have 5 or 6 Replicas, you'll want to set
## maxUnavailable = 2. If you are using the default of 3 Replicas, you'll want
## to set maxUnavailable to 1.
maxUnavailable: 1
## nodeAffinity settings
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: cloud.google.com/gke-preemptible
# operator: NotIn
# values:
# - true
## Anti-Affinity setting. The default "hard" will use pod anti-affinity that is
## requiredDuringSchedulingIgnoredDuringExecution to ensure 2 services don't
## end up on the same node. Setting this to "soft" will use
## preferredDuringSchedulingIgnoredDuringExecution. If set to anything else,
## no anti-affinity rules will be configured.
antiAffinity: "soft"
## Create dedicated UI service
##
ui:
service:
enabled: true
type: "ClusterIP"
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
# cloud.google.com/load-balancer-type: "Internal"
# service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
# service.beta.kubernetes.io/azure-load-balancer-internal: "true"
loadBalancerIP:
## Configure the ingress resource that allows you to access the
## Consul user interface. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## Set to true to enable ingress record generation
enabled: false
## The list of hostnames to be covered with this ingress record.
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
hosts:
- name: consul-ui.local
## Set this to true in order to enable TLS on the ingress record
## A side effect of this will be that the backend consul service will be connected at port 443
tls: false
## Set this to true in order to add the corresponding annotations for cert-manager
certManager: false
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
tlsSecret: consul-ui.local-tls
## Ingress annotations done as key:value pairs
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
annotations:
# kubernetes.io/ingress.class: nginx
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: consul-ui.local-tls
# key:
# certificate:
## Consul configmap
#configmap: |
# {
# "datacenter":"dc2",
# "domain":"consul",
# "data_dir":"/opt/bitnami/consul/data",
# "pid_file":"/opt/bitnami/consul/tmp/consul.pid",
# "server":true,
# "ui":false,
# "bootstrap_expect":3,
# "addresses": {
# "http":"0.0.0.0"
# },
# "ports": {
# "http":8500,
# "dns":8600,
# "serf_lan":8301,
# "server":8300
# },
# "serf_lan":"0.0.0.0"
# }
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
metrics:
enabled: true
image:
registry: docker.io
repository: prom/consul-exporter
tag: v0.3.0
pullPolicy: IfNotPresent
resources: {}
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9107"
## Node selector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {"beta.kubernetes.io/arch": "amd64"}
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
livenessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1

View File

@@ -1,10 +1,16 @@
## Bitnami Consul image version
## Global Docker image registry
## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value
##
# global:
# imageRegistry:
## Bitnami HashiCorp Consul image version
## ref: https://hub.docker.com/r/bitnami/consul/tags/
##
image:
registry: docker.io
repository: bitnami/consul
tag: 1.3.0-debian-9
tag: 1.4.0
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -103,6 +109,15 @@ ui:
service:
enabled: true
type: "ClusterIP"
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
# cloud.google.com/load-balancer-type: "Internal"
# service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
# service.beta.kubernetes.io/azure-load-balancer-internal: "true"
loadBalancerIP:
## Configure the ingress resource that allows you to access the
## Consul user interface. Set up the URL
@@ -171,6 +186,11 @@ ui:
# "serf_lan":"0.0.0.0"
# }
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
metrics:
enabled: false
image:
@@ -179,7 +199,9 @@ metrics:
tag: v0.3.0
pullPolicy: IfNotPresent
resources: {}
annotations: {}
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9107"
## Node selector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector

View File

@@ -1,6 +1,6 @@
name: elasticsearch
version: 4.0.3
appVersion: 6.4.2
version: 4.2.2
appVersion: 6.5.1
description: A highly scalable open-source full-text search and analytics engine
keywords:
- elasticsearch

View File

@@ -49,107 +49,122 @@ $ helm delete --purge my-release
The following table lists the configurable parameters of the Elasticsearch chart and their default values.
| Parameter | Description | Default |
|---------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|
| `image.registry` | Elasticsearch image registry | `docker.io` |
| `image.repository` | Elasticsearch image repository | `bitnami/elasticsearch` |
| `image.tag` | Elasticsearch image tag | `{VERSION}` |
| `image.pullPolicy` | Image pull policy | `Always` |
| `image.pullSecrets` | Specify image pull secrets | `nil` |
| `name` | Elasticsearch cluster name | `elastic` |
| `config` | Elasticsearch node custom configuration | `` |
| `master.name` | Master-eligible node pod name | `master` |
| `master.replicas` | Desired number of Elasticsearch master-eligible nodes | `2` |
| `master.heapSize` | Master-eligible node heap size | `128m` |
| `master.antiAffinity` | Master-eligible node pod anti-affinity policy | `soft` |
| `master.resources` | CPU/Memory resource requests/limits for master-eligible nodes pods | `requests: { cpu: "25m", memory: "256Mi" }` |
| `master.livenessProbe.enabled` | Enable/disable the liveness probe (master-eligible nodes pod) | `true` |
| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (master-eligible nodes pod) | `90` |
| `master.livenessProbe.periodSeconds` | How often to perform the probe (master-eligible nodes pod) | `10` |
| `master.livenessProbe.timeoutSeconds` | When the probe times out (master-eligible nodes pod) | `5` |
| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) | `1` |
| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `master.readinessProbe.enabled` | Enable/disable the readiness probe (master-eligible nodes pod) | `true` |
| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (master-eligible nodes pod) | `90` |
| `master.readinessProbe.periodSeconds` | How often to perform the probe (master-eligible nodes pod) | `10` |
| `master.readinessProbe.timeoutSeconds` | When the probe times out (master-eligible nodes pod) | `5` |
| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) | `1` |
| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `securityContext.enabled` | Enable security context | `true` |
| `securityContext.fsGroup` | Group ID for the container | `1001` |
| `securityContext.runAsUser` | User ID for the container | `1001`
| `discovery.name` | Discover node pod name | `discovery` |
| `coordinating.name` | Coordinating-only node pod name | `coordinating-only` |
| `coordinating.replicas` | Desired number of Elasticsearch coordinating-only nodes | `2` |
| `coordinating.heapSize` | Coordinating-only node heap size | `128m` |
| `coordinating.antiAffinity` | Coordinating-only node pod anti-affinity policy | `soft` |
| `coordinating.service.type` | Coordinating-only node kubernetes service type | `ClusterIP` |
| `coordinating.service.port` | Elasticsearch REST API port | `9200` |
| `coordinating.resources` | CPU/Memory resource requests/limits for coordinating-only nodes pods | `requests: { cpu: "25m", memory: "256Mi" }` |
| `coordinating.livenessProbe.enabled` | Enable/disable the liveness probe (coordinating-only nodes pod) | `true` |
| `coordinating.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (coordinating-only nodes pod) | `90` |
| `coordinating.livenessProbe.periodSeconds` | How often to perform the probe (coordinating-only nodes pod) | `10` |
| `coordinating.livenessProbe.timeoutSeconds` | When the probe times out (coordinating-only nodes pod) | `5` |
| `coordinating.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) | `1` |
| `coordinating.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `coordinating.readinessProbe.enabled` | Enable/disable the readiness probe (coordinating-only nodes pod) | `true` |
| `coordinating.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (coordinating-only nodes pod) | `90` |
| `coordinating.readinessProbe.periodSeconds` | How often to perform the probe (coordinating-only nodes pod) | `10` |
| `coordinating.readinessProbe.timeoutSeconds` | When the probe times out (coordinating-only nodes pod) | `5` |
| `coordinating.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) | `1` |
| `coordinating.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `data.name` | Data node pod name | `data` |
| `data.replicas` | Desired number of Elasticsearch data nodes nodes | `3` |
| `data.heapSize` | Data node heap size | `1024m` |
| `data.antiAffinity` | Data pod anti-affinity policy | `soft` |
| `data.resources` | CPU/Memory resource requests/limits for data nodes | `requests: { cpu: "25m", memory: "1152Mi" }` |
| `data.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` | `true` |
| `data.persistence.annotations` | Persistent Volume Claim annotations | `{}` |
| `data.persistence.storageClass` | Persistent Volume Storage Class | `` |
| `data.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` |
| `data.persistence.size` | Persistent Volume Size | `8Gi` |
| `data.livenessProbe.enabled` | Enable/disable the liveness probe (data nodes pod) | `true` |
| `data.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (data nodes pod) | `90` |
| `data.livenessProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` |
| `data.livenessProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` |
| `data.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` |
| `data.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `data.readinessProbe.enabled` | Enable/disable the readiness probe (data nodes pod) | `true` |
| `data.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (data nodes pod) | `90` |
| `data.readinessProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` |
| `data.readinessProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` |
| `data.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` |
| `data.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `ingest.enabled` | Enable ingest nodes | `false` |
| `ingest.name` | Ingest node pod name | `ingest` |
| `ingest.replicas` | Desired number of Elasticsearch ingest nodes | `2` |
| `ingest.heapSize` | Ingest node heap size | `128m` |
| `ingest.antiAffinity` | Ingest node pod anti-affinity policy | `soft` |
| `ingest.resources` | CPU/Memory resource requests/limits for ingest nodes pods | `requests: { cpu: "25m", memory: "256Mi" }` |
| `ingest.livenessProbe.enabled` | Enable/disable the liveness probe (ingest nodes pod) | `true` |
| `ingest.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (ingest nodes pod) | `90` |
| `ingest.livenessProbe.periodSeconds` | How often to perform the probe (ingest nodes pod) | `10` |
| `ingest.livenessProbe.timeoutSeconds` | When the probe times out (ingest nodes pod) | `5` |
| `ingest.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) | `1` |
| `ingest.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `ingest.readinessProbe.enabled` | Enable/disable the readiness probe (ingest nodes pod) | `true` |
| `ingest.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (ingest nodes pod) | `90` |
| `ingest.readinessProbe.periodSeconds` | How often to perform the probe (ingest nodes pod) | `10` |
| `ingest.readinessProbe.timeoutSeconds` | When the probe times out (ingest nodes pod) | `5` |
| `ingest.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) | `1` |
| `ingest.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `metrics.enabled` | Enable prometheus exporter | `false` |
| `metrics.name` | Metrics pod name | `metrics` |
| `metrics.image.registry` | Metrics exporter image registry | `docker.io` |
| `metrics.image.repository` | Metrics exporter image repository | `bitnami/elasticsearch-exporter` |
| `metrics.image.tag` | Metrics exporter image tag | `latest` |
| `metrics.image.pullPolicy` | Metrics exporter image pull policy | `Always` |
| `metrics.service.type` | Metrics exporter endpoint service type | `ClusterIP` |
| `metrics.resources` | Metrics exporter resource requests/limit | `requests: { cpu: "25m" }` |
| `sysctlImage.registry` | Kernel settings modifier image registry | `docker.io` |
| `sysctlImage.repository` | Kernel settings modifier image repository | `busybox` |
| `sysctlImage.tag` | Kernel settings modifier image tag | `latest` |
| `sysctlImage.pullPolicy` | Kernel settings modifier image pull policy | `Always` |
| Parameter | Description | Default |
|---------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
| `global.imageRegistry` | Global Docker image registry | `nil` |
| `image.registry` | Elasticsearch image registry | `docker.io` |
| `image.repository` | Elasticsearch image repository | `bitnami/elasticsearch` |
| `image.tag` | Elasticsearch image tag | `{VERSION}` |
| `image.pullPolicy` | Image pull policy | `Always` |
| `image.pullSecrets` | Specify image pull secrets | `nil` |
| `name` | Elasticsearch cluster name | `elastic` |
| `config` | Elasticsearch node custom configuration | `` |
| `master.name` | Master-eligible node pod name | `master` |
| `master.replicas` | Desired number of Elasticsearch master-eligible nodes | `2` |
| `master.heapSize` | Master-eligible node heap size | `128m` |
| `master.antiAffinity` | Master-eligible node pod anti-affinity policy | `soft` |
| `master.service.type` | Kubernetes Service type (master-eligible nodes) | `ClusterIP` |
| `master.service.port` | Kubernetes Service port for Elasticsearch transport port (master-eligible nodes) | `9300` |
| `master.service.nodePort` | Kubernetes Service nodePort (master-eligible nodes) | `nil` |
| `master.service.annotations` | Annotations for master-eligible nodes service | {} |
| `master.service.loadBalancerIP` | loadBalancerIP if master-eligible nodes service type is `LoadBalancer` | `nil` |
| `master.resources` | CPU/Memory resource requests/limits for master-eligible nodes pods | `requests: { cpu: "25m", memory: "256Mi" }` |
| `master.livenessProbe.enabled` | Enable/disable the liveness probe (master-eligible nodes pod) | `true` |
| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (master-eligible nodes pod) | `90` |
| `master.livenessProbe.periodSeconds` | How often to perform the probe (master-eligible nodes pod) | `10` |
| `master.livenessProbe.timeoutSeconds` | When the probe times out (master-eligible nodes pod) | `5` |
| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) | `1` |
| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `master.readinessProbe.enabled` | Enable/disable the readiness probe (master-eligible nodes pod) | `true` |
| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (master-eligible nodes pod) | `90` |
| `master.readinessProbe.periodSeconds` | How often to perform the probe (master-eligible nodes pod) | `10` |
| `master.readinessProbe.timeoutSeconds` | When the probe times out (master-eligible nodes pod) | `5` |
| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) | `1` |
| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `securityContext.enabled` | Enable security context | `true` |
| `securityContext.fsGroup` | Group ID for the container | `1001` |
| `securityContext.runAsUser` | User ID for the container | `1001` |
| `discovery.name` | Discover node pod name | `discovery` |
| `coordinating.name` | Coordinating-only node pod name | `coordinating-only` |
| `coordinating.replicas` | Desired number of Elasticsearch coordinating-only nodes | `2` |
| `coordinating.heapSize` | Coordinating-only node heap size | `128m` |
| `coordinating.antiAffinity` | Coordinating-only node pod anti-affinity policy | `soft` |
| `coordinating.service.type` | Kubernetes Service type (coordinating-only nodes) | `ClusterIP` |
| `coordinating.service.port` | Kubernetes Service port for REST API (coordinating-only nodes) | `9200` |
| `coordinating.service.nodePort` | Kubernetes Service nodePort (coordinating-only nodes) | `nil` |
| `coordinating.service.annotations` | Annotations for coordinating-only nodes service | {} |
| `coordinating.service.loadBalancerIP` | loadBalancerIP if coordinating-only nodes service type is `LoadBalancer` | `nil` |
| `coordinating.resources` | CPU/Memory resource requests/limits for coordinating-only nodes pods | `requests: { cpu: "25m", memory: "256Mi" }` |
| `coordinating.livenessProbe.enabled` | Enable/disable the liveness probe (coordinating-only nodes pod) | `true` |
| `coordinating.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (coordinating-only nodes pod) | `90` |
| `coordinating.livenessProbe.periodSeconds` | How often to perform the probe (coordinating-only nodes pod) | `10` |
| `coordinating.livenessProbe.timeoutSeconds` | When the probe times out (coordinating-only nodes pod) | `5` |
| `coordinating.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) | `1` |
| `coordinating.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `coordinating.readinessProbe.enabled` | Enable/disable the readiness probe (coordinating-only nodes pod) | `true` |
| `coordinating.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (coordinating-only nodes pod) | `90` |
| `coordinating.readinessProbe.periodSeconds` | How often to perform the probe (coordinating-only nodes pod) | `10` |
| `coordinating.readinessProbe.timeoutSeconds` | When the probe times out (coordinating-only nodes pod) | `5` |
| `coordinating.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) | `1` |
| `coordinating.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `data.name` | Data node pod name | `data` |
| `data.replicas` | Desired number of Elasticsearch data nodes nodes | `3` |
| `data.heapSize` | Data node heap size | `1024m` |
| `data.antiAffinity` | Data pod anti-affinity policy | `soft` |
| `data.resources` | CPU/Memory resource requests/limits for data nodes | `requests: { cpu: "25m", memory: "1152Mi" }` |
| `data.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` | `true` |
| `data.persistence.annotations` | Persistent Volume Claim annotations | `{}` |
| `data.persistence.storageClass` | Persistent Volume Storage Class | `` |
| `data.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` |
| `data.persistence.size` | Persistent Volume Size | `8Gi` |
| `data.livenessProbe.enabled` | Enable/disable the liveness probe (data nodes pod) | `true` |
| `data.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (data nodes pod) | `90` |
| `data.livenessProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` |
| `data.livenessProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` |
| `data.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` |
| `data.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `data.readinessProbe.enabled` | Enable/disable the readiness probe (data nodes pod) | `true` |
| `data.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (data nodes pod) | `90` |
| `data.readinessProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` |
| `data.readinessProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` |
| `data.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` |
| `data.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `ingest.enabled` | Enable ingest nodes | `false` |
| `ingest.name` | Ingest node pod name | `ingest` |
| `ingest.replicas` | Desired number of Elasticsearch ingest nodes | `2` |
| `ingest.heapSize` | Ingest node heap size | `128m` |
| `ingest.antiAffinity` | Ingest node pod anti-affinity policy | `soft` |
| `ingest.service.type` | Kubernetes Service type (ingest nodes) | `ClusterIP` |
| `ingest.service.port` | Kubernetes Service port Elasticsearch transport port (ingest nodes) | `9300` |
| `ingest.service.nodePort` | Kubernetes Service nodePort (ingest nodes) | `nil` |
| `ingest.service.annotations` | Annotations for ingest nodes service | {} |
| `ingest.service.loadBalancerIP` | loadBalancerIP if ingest nodes service type is `LoadBalancer` | `nil` |
| `ingest.resources` | CPU/Memory resource requests/limits for ingest nodes pods | `requests: { cpu: "25m", memory: "256Mi" }` |
| `ingest.livenessProbe.enabled` | Enable/disable the liveness probe (ingest nodes pod) | `true` |
| `ingest.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (ingest nodes pod) | `90` |
| `ingest.livenessProbe.periodSeconds` | How often to perform the probe (ingest nodes pod) | `10` |
| `ingest.livenessProbe.timeoutSeconds` | When the probe times out (ingest nodes pod) | `5` |
| `ingest.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) | `1` |
| `ingest.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `ingest.readinessProbe.enabled` | Enable/disable the readiness probe (ingest nodes pod) | `true` |
| `ingest.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (ingest nodes pod) | `90` |
| `ingest.readinessProbe.periodSeconds` | How often to perform the probe (ingest nodes pod) | `10` |
| `ingest.readinessProbe.timeoutSeconds` | When the probe times out (ingest nodes pod) | `5` |
| `ingest.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) | `1` |
| `ingest.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `metrics.enabled` | Enable prometheus exporter | `false` |
| `metrics.name` | Metrics pod name | `metrics` |
| `metrics.image.registry` | Metrics exporter image registry | `docker.io` |
| `metrics.image.repository` | Metrics exporter image repository | `bitnami/elasticsearch-exporter` |
| `metrics.image.tag` | Metrics exporter image tag | `1.0.2` |
| `metrics.image.pullPolicy` | Metrics exporter image pull policy | `Always` |
| `metrics.service.type` | Metrics exporter endpoint service type | `ClusterIP` |
| `metrics.resources` | Metrics exporter resource requests/limit | `requests: { cpu: "25m" }` |
| `sysctlImage.enabled` | Enable kernel settings modifier image | `false` |
| `sysctlImage.registry` | Kernel settings modifier image registry | `docker.io` |
| `sysctlImage.repository` | Kernel settings modifier image repository | `bitnami/minideb` |
| `sysctlImage.tag` | Kernel settings modifier image tag | `latest` |
| `sysctlImage.pullPolicy` | Kernel settings modifier image pull policy | `Always` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
@@ -175,6 +190,21 @@ The [Bitnami Elasticsearch](https://github.com/bitnami/bitnami-docker-elasticsea
By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. See the [Configuration](#configuration) section to configure the PVC.
## Troubleshooting
Currently, Elasticsearch requires some changes in the kernel of the host machine to work as expected. If those values are not set in the underlying operating system, the ES containers fail to boot with ERROR messages. More information about these requirements can be found in the links below:
- [File Descriptor requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html)
- [Virtual memory requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html)
You can use a **privileged** initContainer to changes those settings in the Kernel by enabling the `sysctlImage.enabled`:
```console
$ helm install --name my-release \
--set sysctlImage.enabled=true \
bitnami/elasticsearch
```
## Upgrading
### To 3.0.0

View File

@@ -11,6 +11,34 @@
suggest that you switch to "ClusterIP" or "NodePort".
-------------------------------------------------------------------------------
{{- end }}
{{- if not .Values.sysctlImage.enabled }}
-------------------------------------------------------------------------------
WARNING
Elasticsearch requires some changes in the kernel of the host machine to
work as expected. If those values are not set in the underlying operating
system, the ES containers fail to boot with ERROR messages.
To check whether the host machine meets the requirements, run the command
below:
kubectl logs --namespace {{ .Release.Namespace }} $(kubectl get --namespace {{ .Release.Namespace }} \
pods -l app={{ template "elasticsearch.name" . }},role=master -o jsonpath='{.items[0].metadata.name}') \
elasticsearch
You can adapt the Kernel parameters on you cluster as described in the
official documentation:
https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster
As an alternative, you can specify "sysctlImage.enabled=true" to use a
privileged initContainer to change those settings in the Kernel:
helm upgrade {{ .Release.Name }} bitnami/elasticsearch \
--set sysctlImage.enabled=true
{{- end }}
** Please be patient while the chart is being deployed **

View File

@@ -26,9 +26,23 @@ Create chart name and version as used by the chart label.
Return the proper ES image name
*/}}
{{- define "elasticsearch.image" -}}
{{- $registryName := default "docker.io" .Values.image.registry -}}
{{- $tag := default "latest" .Values.image.tag | toString -}}
{{- printf "%s/%s:%s" $registryName .Values.image.repository $tag -}}
{{- $registryName := .Values.image.registry -}}
{{- $repositoryName := .Values.image.repository -}}
{{- $tag := .Values.image.tag | toString -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}}
{{- if .Values.global }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- end -}}
{{/*

View File

@@ -13,7 +13,7 @@ spec:
matchLabels:
app: {{ template "elasticsearch.name" . }}
release: "{{ .Release.Name }}"
role: "coordinating-only"
role: "coordinating-only"
replicas: {{ .Values.coordinating.replicas }}
template:
metadata:
@@ -55,15 +55,18 @@ spec:
- name: {{ . }}
{{- end}}
{{- end }}
{{- if .Values.sysctlImage.enabled }}
## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors)
initContainers:
- name: sysctl
image: {{ template "sysctl.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
command: ["sysctl", "-w", "vm.max_map_count=262144"]
command: ['sh', '-c', 'install_packages systemd && sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536']
securityContext:
privileged: true
{{- end }}
containers:
- name: {{ template "elasticsearch.coordinating.fullname" . }}
- name: "elasticsearch"
{{- if .Values.securityContext.enabled }}
securityContext:
runAsUser: {{ .Values.securityContext.runAsUser }}

View File

@@ -8,12 +8,22 @@ metadata:
heritage: {{ .Release.Service | quote }}
release: {{ .Release.Name | quote }}
role: "coordinating-only"
{{- with .Values.coordinating.service.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
type: {{ .Values.coordinating.service.type | quote }}
{{- if and (eq .Values.coordinating.service.type "LoadBalancer") .Values.coordinating.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.coordinating.service.loadBalancerIP }}
{{- end }}
ports:
- name: http
port: {{ .Values.coordinating.service.port }}
targetPort: http
{{- if .Values.coordinating.service.nodePort }}
nodePort: {{ .Values.coordinating.service.nodePort }}
{{- end }}
selector:
app: {{ template "elasticsearch.name" . }}
release: {{ .Release.Name | quote }}

View File

@@ -51,15 +51,18 @@ spec:
release: {{ .Release.Name | quote }}
role: "data"
{{- end }}
{{- if .Values.sysctlImage.enabled }}
## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors)
initContainers:
- name: sysctl
image: {{ template "sysctl.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
command: ["sysctl", "-w", "vm.max_map_count=262144"]
command: ['sh', '-c', 'install_packages systemd && sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536']
securityContext:
privileged: true
{{- end }}
containers:
- name: {{ template "elasticsearch.data.fullname" . }}
- name: "elasticsearch"
image: {{ template "elasticsearch.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
{{- if .Values.securityContext.enabled }}

View File

@@ -14,7 +14,7 @@ spec:
matchLabels:
app: {{ template "elasticsearch.name" . }}
release: "{{ .Release.Name }}"
role: "ingest"
role: "ingest"
replicas: {{ .Values.ingest.replicas }}
template:
metadata:
@@ -56,15 +56,18 @@ spec:
- name: {{ . }}
{{- end}}
{{- end }}
{{- if .Values.sysctlImage.enabled }}
## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors)
initContainers:
- name: sysctl
image: {{ template "sysctl.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
command: ["sysctl", "-w", "vm.max_map_count=262144"]
command: ['sh', '-c', 'install_packages systemd && sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536']
securityContext:
privileged: true
{{- end }}
containers:
- name: {{ template "elasticsearch.ingest.fullname" . }}
- name: "elasticsearch"
image: {{ template "elasticsearch.image" . }}
{{- if .Values.securityContext.enabled }}
securityContext:

View File

@@ -9,11 +9,22 @@ metadata:
heritage: {{ .Release.Service | quote }}
release: {{ .Release.Name | quote }}
role: "ingest"
{{- with .Values.ingest.service.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
type: {{ .Values.ingest.service.type | quote }}
{{- if and (eq .Values.ingest.service.type "LoadBalancer") .Values.ingest.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.ingest.service.loadBalancerIP }}
{{- end }}
ports:
- name: transport
port: 9300
port: {{ .Values.ingest.service.port }}
targetPort: transport
{{- if .Values.ingest.service.nodePort }}
nodePort: {{ .Values.ingest.service.nodePort }}
{{- end }}
selector:
app: {{ template "elasticsearch.name" . }}
release: {{ .Release.Name | quote }}

View File

@@ -13,7 +13,7 @@ spec:
matchLabels:
app: {{ template "elasticsearch.name" . }}
release: "{{ .Release.Name }}"
role: "master"
role: "master"
replicas: {{ .Values.master.replicas }}
template:
metadata:
@@ -56,15 +56,18 @@ spec:
- name: {{ . }}
{{- end}}
{{- end }}
{{- if .Values.sysctlImage.enabled }}
## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors)
initContainers:
- name: sysctl
image: {{ template "sysctl.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
command: ["sysctl", "-w", "vm.max_map_count=262144"]
command: ['sh', '-c', 'install_packages systemd && sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536']
securityContext:
privileged: true
{{- end }}
containers:
- name: {{ template "elasticsearch.master.fullname" . }}
- name: "elasticsearch"
image: {{ template "elasticsearch.image" . }}
{{- if .Values.securityContext.enabled }}
securityContext:

View File

@@ -8,11 +8,22 @@ metadata:
heritage: {{ .Release.Service | quote }}
release: {{ .Release.Name | quote }}
role: "master"
{{- with .Values.master.service.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
type: {{ .Values.master.service.type | quote }}
{{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.master.service.loadBalancerIP }}
{{- end }}
ports:
- name: transport
port: 9300
port: {{ .Values.master.service.port }}
targetPort: transport
{{- if .Values.master.service.nodePort }}
nodePort: {{ .Values.master.service.nodePort }}
{{- end }}
selector:
app: {{ template "elasticsearch.name" . }}
release: {{ .Release.Name | quote }}

View File

@@ -34,7 +34,7 @@ spec:
- name: {{ template "elasticsearch.metrics.fullname" . }}
image: {{ template "metrics.image" . }}
imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
args: [ "-es.uri=http://{{ template "coordinating.fullname" . }}:{{ .Values.coordinating.service.port }}", "-es.all=true" ]
args: [ "-es.uri=http://{{ template "elasticsearch.coordinating.fullname" . }}:{{ .Values.coordinating.service.port }}", "-es.all=true" ]
ports:
- name: metrics
containerPort: 9108

View File

@@ -1,15 +1,21 @@
## Global Docker image registry
## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value
##
# global:
# imageRegistry:
## Bitnami Elasticsearch image version
## ref: https://hub.docker.com/r/bitnami/elasticsearch/tags/
##
image:
registry: docker.io
repository: bitnami/elasticsearch
tag: 6.4.2-debian-9
tag: 6.5.1
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: Always
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
@@ -33,6 +39,23 @@ master:
replicas: 3
heapSize: 128m
antiAffinity: "soft"
service:
## master-eligible service type
type: ClusterIP
## Elasticsearch transport port
port: 9300
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
# loadBalancerIP:
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
@@ -58,15 +81,17 @@ master:
failureThreshold: 5
## Image that performs the sysctl operation
##
sysctlImage:
enabled: false
registry: docker.io
repository: busybox
repository: bitnami/minideb
tag: latest
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: Always
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
@@ -95,9 +120,22 @@ coordinating:
heapSize: 128m
antiAffinity: "soft"
service:
## coordinating-only service type
type: ClusterIP
## Externally accessible elasticsearch REST API port
## Elasticsearch REST API port
port: 9200
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
# loadBalancerIP:
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
@@ -189,6 +227,23 @@ ingest:
replicas: 2
heapSize: 128m
antiAffinity: "soft"
service:
## ingest service type
type: ClusterIP
## Elasticsearch transport port
port: 9300
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
# loadBalancerIP:
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
@@ -219,8 +274,8 @@ metrics:
image:
registry: docker.io
repository: bitnami/elasticsearch-exporter
tag: latest
pullPolicy: Always
tag: 1.0.2
pullPolicy: IfNotPresent
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9108"

View File

@@ -1,10 +1,16 @@
## Global Docker image registry
## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value
##
# global:
# imageRegistry:
## Bitnami Elasticsearch image version
## ref: https://hub.docker.com/r/bitnami/elasticsearch/tags/
##
image:
registry: docker.io
repository: bitnami/elasticsearch
tag: 6.4.2-debian-9
tag: 6.5.1
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -18,9 +24,11 @@ image:
# - myRegistrKeySecretName
## Image that performs the sysctl operation
##
sysctlImage:
enabled: false
registry: docker.io
repository: busybox
repository: bitnami/minideb
tag: latest
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@@ -50,6 +58,26 @@ master:
replicas: 2
heapSize: 128m
antiAffinity: "soft"
service:
## master-eligible service type
type: ClusterIP
## Elasticsearch transport port
port: 9300
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
# loadBalancerIP:
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
@@ -95,9 +123,22 @@ coordinating:
heapSize: 128m
antiAffinity: "soft"
service:
## coordinating-only service type
type: ClusterIP
## Externally accessible elasticsearch REST API port
## Elasticsearch REST API port
port: 9200
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
# loadBalancerIP:
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
@@ -189,6 +230,23 @@ ingest:
replicas: 2
heapSize: 128m
antiAffinity: "soft"
service:
## ingest service type
type: ClusterIP
## Elasticsearch transport port
port: 9300
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
# loadBalancerIP:
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
@@ -219,7 +277,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/elasticsearch-exporter
tag: latest
tag: 1.0.2
pullPolicy: Always
annotations:
prometheus.io/scrape: "true"

View File

@@ -1,5 +1,5 @@
name: etcd
version: 1.1.6
version: 1.3.0
appVersion: 3.3.10
description: etcd is a distributed key value store that provides a reliable way to store data across a cluster of machines
keywords:

View File

@@ -47,6 +47,7 @@ The following tables lists the configurable parameters of the etcd chart and the
| Parameter | Description | Default |
|---------------------------------------|----------------------------------------------------------------------------------------------------------|------------------------------------------------------------------- |
| `global.imageRegistry` | Global Docker image registry | `nil` |
| `image.registry` | etcd image registry | `docker.io` |
| `image.repository` | etcd Image name | `bitnami/etcd` |
| `image.tag` | etcd Image tag | `{VERSION}` |
@@ -101,6 +102,10 @@ The following tables lists the configurable parameters of the etcd chart and the
| `readinessProbe.timeoutSeconds` | When the probe times out | 5 |
| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 |
| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 |
| `podAnnotations` | Annotations to be added to pods | {} |
| `metrics.enabled` | Enable prometheus to access etcd metrics endpoint | `false` |
| `metrics.podAnnotations` | Annotations for enabling prometheus to access the metrics endpoint | {`prometheus.io/scrape: "true",prometheus.io/port: "2379"`} |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,

View File

@@ -27,10 +27,23 @@ Return the proper etcd image name
*/}}
{{- define "etcd.image" -}}
{{- $registryName := .Values.image.registry -}}
{{- $repositoryName := .Values.image.repository -}}
{{- $tag := .Values.image.tag | toString -}}
{{- printf "%s/%s:%s" $registryName .Values.image.repository $tag -}}
{{- end -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}}
{{- if .Values.global }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- end -}}
{{/*
Return the proper etcd peer protocol
@@ -81,4 +94,4 @@ Return the proper etcdctl authentication options
{{- if .Values.auth.client.enableAuthentication -}}
{{- printf "%s" $caOption -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@@ -21,12 +21,6 @@ spec:
rollingUpdate:
partition: {{ .Values.statefulset.rollingUpdatePartition }}
{{- end }}
selector:
matchLabels:
app: {{ template "etcd.name" . }}
chart: {{ template "etcd.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
template:
metadata:
name: "{{ template "etcd.fullname" . }}"
@@ -35,6 +29,15 @@ spec:
chart: {{ template "etcd.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
{{- if or .Values.podAnnotations .Values.metrics.enabled }}
annotations:
{{- if .Values.podAnnotations }}
{{ toYaml .Values.podAnnotations | indent 8 }}
{{- end }}
{{- if .Values.metrics.podAnnotations }}
{{ toYaml .Values.metrics.podAnnotations | indent 8 }}
{{- end }}
{{- end }}
spec:
{{- if .Values.image.pullSecrets }}
imagePullSecrets:

View File

@@ -1,10 +1,16 @@
## Global Docker image registry
## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value
##
# global:
# imageRegistry:
## Bitnami etcd image version
## ref: https://hub.docker.com/r/bitnami/etcd/tags/
##
image:
registry: docker.io
repository: bitnami/etcd
tag: 3.3.10-debian-9
tag: 3.3.10
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -165,4 +171,15 @@ readinessProbe:
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
successThreshold: 1
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
metrics:
enabled: true
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "2379"

View File

@@ -1,10 +1,16 @@
## Global Docker image registry
## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value
##
# global:
# imageRegistry:
## Bitnami etcd image version
## ref: https://hub.docker.com/r/bitnami/etcd/tags/
##
image:
registry: docker.io
repository: bitnami/etcd
tag: 3.3.10-debian-9
tag: 3.3.10
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -165,4 +171,15 @@ readinessProbe:
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
successThreshold: 1
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
metrics:
enabled: false
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "2379"

View File

@@ -1,6 +1,6 @@
name: external-dns
version: 1.0.4
appVersion: 0.5.7
version: 1.1.2
appVersion: 0.5.8
description: ExternalDNS is a Kubernetes addon that configures public DNS servers with information about exposed Kubernetes services to make them discoverable.
keywords:
- external-dns

View File

@@ -48,6 +48,7 @@ The following table lists the configurable parameters of the external-dns chart
| Parameter | Description | Default |
| ------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------- |
| `global.imageRegistry` | Global Docker image registry | `nil` |
| `image.registry` | ExternalDNS image registry | `docker.io` |
| `image.repository` | ExternalDNS Image name | `bitnami/external-dns` |
| `image.tag` | ExternalDNS Image tag | `{VERSION}` |
@@ -124,7 +125,23 @@ $ helm install --name my-release -f values.yaml bitnami/external-dns
Find information about the requirements for each DNS provider on the link below:
- [ExternalDNS Tutorials](https://github.com/kubernetes-incubator/external-dns/tree/master/docs/tutorials)
- [ExternalDNS Tutorials](https://github.com/kubernetes-incubator/external-dns/tree/master/docs/tutorials)
For instance, to install ExternalDNS on AWS, you need to:
- Provide the K8s worker node which runs the cluster autoscaler with a minimum IAM policy (check [IAM permissions docs](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/tutorials/aws.md#iam-permissions) for more information).
- Setup a hosted zone on Route53 and annotate the Hosted Zone ID and its associated "nameservers" as described on [these docs](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/tutorials/aws.md#set-up-a-hosted-zone).
- Install ExternalDNS chart using the command below:
> Note: replace the placeholder HOSTED_ZONE_NAME with your hosted zoned name.
```bash
$ helm install --name my-release \
--set provider=aws \
--set aws.zoneType=public \
--set domainFilters=HOSTED_ZONE_NAME \
bitnami/external-dns
```
## Upgrading

View File

@@ -27,8 +27,22 @@ Return the proper external-dns image name
*/}}
{{- define "external-dns.image" -}}
{{- $registryName := .Values.image.registry -}}
{{- $repositoryName := .Values.image.repository -}}
{{- $tag := .Values.image.tag | toString -}}
{{- printf "%s/%s:%s" $registryName .Values.image.repository $tag -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}}
{{- if .Values.global }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- end -}}
{{/*

View File

@@ -1,10 +1,16 @@
## Global Docker image registry
## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value
##
# global:
# imageRegistry:
## Bitnami external-dns image version
## ref: https://hub.docker.com/r/bitnami/external-dns/tags/
##
image:
registry: docker.io
repository: bitnami/external-dns
tag: 0.5.7-debian-9
tag: 0.5.8
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -117,7 +123,7 @@ rbac:
##
serviceAccountName: default
## RBAC API version
apiVersion: v1beta1
apiVersion: v1beta1
## Kubernetes Security Context
## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/

View File

@@ -1,6 +1,6 @@
name: jenkins
version: 1.1.3
appVersion: 2.138.2
version: 1.2.2
appVersion: 2.138.3
description: The leading open source automation server
keywords:
- jenkins

View File

@@ -47,6 +47,7 @@ The following tables lists the configurable parameters of the Jenkins chart and
| Parameter | Description | Default |
|----------------------------|----------------------------------------|---------------------------------------------------------- |
| `global.imageRegistry` | Global Docker image registry | `nil` |
| `image.registry` | Jenkins image registry | `docker.io` |
| `image.repository` | Jenkins Image name | `bitnami/jenkins` |
| `image.tag` | Jenkins Image tag | `{VERSION}` |

View File

@@ -14,3 +14,26 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Return the proper Jenkins image name
*/}}
{{- define "jenkins.image" -}}
{{- $registryName := .Values.image.registry -}}
{{- $repositoryName := .Values.image.repository -}}
{{- $tag := .Values.image.tag | toString -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}}
{{- if .Values.global }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- end -}}

View File

@@ -27,7 +27,7 @@ spec:
{{- end }}
containers:
- name: {{ template "fullname" . }}
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
image: "{{ template "jenkins.image" . }}"
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
env:
- name: JENKINS_USERNAME

View File

@@ -1,10 +1,16 @@
## Global Docker image registry
## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value
##
# global:
# imageRegistry:
## Bitnami Jenkins image version
## ref: https://hub.docker.com/r/bitnami/jenkins/tags/
##
image:
registry: docker.io
repository: bitnami/jenkins
tag: 2.138.2-debian-9
tag: 2.138.3
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images

View File

@@ -1,6 +1,6 @@
name: kafka
version: 1.0.2
appVersion: 2.0.0
version: 1.1.3
appVersion: 2.0.1
description: Apache Kafka is a distributed streaming platform.
keywords:
- kafka

View File

@@ -47,6 +47,7 @@ The following tables lists the configurable parameters of the Kafka chart and th
| Parameter | Description | Default |
|----------------------------------|------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------- |
| `global.imageRegistry` | Global Docker image registry | `nil` |
| `image.registry` | Kafka image registry | `docker.io` |
| `image.repository` | Kafka Image name | `bitnami/kafka` |
| `image.tag` | Kafka Image tag | `{VERSION}` |
@@ -57,8 +58,8 @@ The following tables lists the configurable parameters of the Kafka chart and th
| `replicaCount` | Number of Kafka nodes | `1` |
| `config` | Configuration file for Kafka | `nil` |
| `allowPlaintextListener` | Allow to use the PLAINTEXT listener | `true` |
| `listeners` | The address the socket server listens on. | `nil` |
| `advertisedListeners` | Hostname and port the broker will advertise to producers and consumers. | `nil` |
| `listeners` | The address the socket server listens on. | `nil` |
| `advertisedListeners` | Hostname and port the broker will advertise to producers and consumers. | `nil` |
| `brokerId` | ID of the Kafka node | `-1` |
| `deleteTopicEnable` | Switch to enable topic deletion or not. | `false` |
| `heapOpts` | Kafka's Java Heap size. | `-Xmx1024m -Xms1024m` |

View File

@@ -1,6 +1,6 @@
dependencies:
- name: zookeeper
repository: https://charts.bitnami.com/bitnami
version: 0.0.1
digest: sha256:5de3747cef6f8cc34f166277b81f8ccea2c8849caadf133c1d9bc7da5fe71607
generated: 2018-06-14T16:25:45.420195607+02:00
version: 1.1.0
digest: sha256:eacfe6cdc916044d9b3594d6a9b2f265593c50f41565df0689f6e0f612d9dec6
generated: 2018-10-16T08:55:34.669048+02:00

View File

@@ -1,5 +1,5 @@
dependencies:
- name: zookeeper
version: 0.x.x
version: 1.x.x
repository: https://charts.bitnami.com/bitnami
condition: zookeeper.enabled

View File

@@ -27,8 +27,22 @@ Return the proper Kafka image name
*/}}
{{- define "kafka.image" -}}
{{- $registryName := .Values.image.registry -}}
{{- $repositoryName := .Values.image.repository -}}
{{- $tag := .Values.image.tag | toString -}}
{{- printf "%s/%s:%s" $registryName .Values.image.repository $tag -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}}
{{- if .Values.global }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- end -}}
{{/*
@@ -55,5 +69,5 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
*/}}
{{- define "kafka.zookeeper.fullname" -}}
{{- $name := default "zookeeper" .Values.zookeeper.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- printf "%s-%s" .Release.Name $name | trunc 24 | trimSuffix "-" -}}
{{- end -}}

View File

@@ -1,10 +1,16 @@
## Global Docker image registry
## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value
##
# global:
# imageRegistry:
## Bitnami Kafka image version
## ref: https://hub.docker.com/r/bitnami/kafka/tags/
##
image:
registry: docker.io
repository: bitnami/kafka
tag: 2.0.0-debian-9
tag: 2.0.1
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -319,6 +325,11 @@ metrics:
- kafka.network:*
- kafka.log:*
##
## Zookeeper chart configuration
##
## https://github.com/bitnami/charts/blob/master/bitnami/zookeeper/values.yaml
##
zookeeper:
enabled: true

View File

@@ -1,10 +1,16 @@
## Global Docker image registry
## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value
##
# global:
# imageRegistry:
## Bitnami Kafka image version
## ref: https://hub.docker.com/r/bitnami/kafka/tags/
##
image:
registry: docker.io
repository: bitnami/kafka
tag: 2.0.0-debian-9
tag: 2.0.1
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -319,6 +325,11 @@ metrics:
- kafka.network:*
- kafka.log:*
##
## Zookeeper chart configuration
##
## https://github.com/bitnami/charts/blob/master/bitnami/zookeeper/values.yaml
##
zookeeper:
enabled: true

View File

@@ -1,7 +1,7 @@
apiVersion: v1
name: kubeapps
version: 0.6.1
appVersion: v1.0.0-beta.2
version: 1.0.0
appVersion: v1.0.0
description: Kubeapps is a dashboard for your Kubernetes cluster that makes it easy to deploy and manage applications in your cluster using Helm
icon: https://raw.githubusercontent.com/kubeapps/kubeapps/master/docs/img/logo.png
keywords:
@@ -17,3 +17,5 @@ maintainers:
email: containers@bitnami.com
- name: prydonius
email: adnan@bitnami.com
# 2.10+ required because we need the install-crd hook type
tillerVersion: ">=2.10.0"

View File

@@ -27,7 +27,7 @@ It also packages the [Bitnami MongoDB chart](https://github.com/helm/charts/tree
## Prerequisites
- Kubernetes 1.8+ (tested with Azure Kubernetes Service, Google Kubernetes Engine, minikube and Docker for Desktop Kubernetes)
- Helm 2.9.1+
- Helm 2.10.0+
- PV provisioner support in the underlying infrastructure
- Administrative access to the cluster to create and update RBAC ClusterRoles
@@ -115,6 +115,7 @@ apprepository:
EOF
$ helm install --name kubeapps --namespace kubeapps bitnami/kubeapps -f custom-values.yaml
```
### Configuring connection to a custom namespace Tiller instance
By default, Kubeapps connects to the Tiller Service in the `kube-system` namespace, the default install location for Helm.
@@ -170,7 +171,7 @@ To enable ingress integration, please set `ingress.enabled` to `true`
##### Hosts
Most likely you will only want to have one hostname that maps to this Kubeapps installation, however, it is possible to have more than one host. To facilitate this, the `ingress.hosts` object is an array.
Most likely you will only want to have one hostname that maps to this Kubeapps installation, however, it is possible to have more than one host. To facilitate this, the `ingress.hosts` object is an array.
##### Annotations
@@ -178,7 +179,18 @@ For annotations, please see [this document](https://github.com/kubernetes/ingres
##### TLS
TLS can be configured using the `ingress.tls` object in the same format that the Kubernetes Ingress requests. Please see [this example](https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tls) for more information.
TLS can be configured using setting the `ingress.hosts[].tls` boolean of the corresponding hostname to true, then you can choose the TLS secret name setting `ingress.hosts[].tlsSecret`. Please see [this example](https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tls) for more information.
You can provide your own certificates using the `ingress.secrets` object. If your cluster has a [cert-manager](https://github.com/jetstack/cert-manager) add-on to automate the management and issuance of TLS certificates, set `ingress.hosts[].certManager` boolean to true to enable the corresponding annotations for cert-manager as shown in the example below:
```console
helm install --name kubeapps --namespace kubeapps bitnami/kubeapps \
--set ingress.enabled=true \
--set ingress.certManager=true \
--set ingress.hosts[0].name=kubeapps.custom.domain \
--set ingress.hosts[0].tls=true \
--set ingress.hosts[0].tlsSecret=kubeapps-tls
```
## Troubleshooting
@@ -190,7 +202,15 @@ If during installation you run into an error similar to:
Error: release kubeapps failed: clusterroles.rbac.authorization.k8s.io "kubeapps-apprepository-controller" is forbidden: attempt to grant extra privileges: [{[get] [batch] [cronjobs] [] []...
```
It is possible that your cluster does not have Role Based Access Control (RBAC) fully configured. In which case you should perform the chart installation by setting `rbac.create=false`:
This usually is an indication that Tiller was not installed with enough permissions to create the resources by Kubeapps. In order to install Kubeapps, you will need to install Tiller with elevated permissions (e.g. as a cluster-admin). For example:
```
kubectl -n kube-system create sa tiller
kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller
helm init --service-account tiller
```
It is also possible, though less common, that your cluster does not have Role Based Access Control (RBAC) enabled. If this is the case you should perform the chart installation by setting `rbac.create=false`:
```console
$ helm install --name kubeapps --namespace kubeapps bitnami/kubeapps --set rbac.create=false

View File

@@ -1,6 +1,6 @@
dependencies:
- name: mongodb
repository: https://kubernetes-charts.storage.googleapis.com
version: 4.3.7
version: 4.9.0
digest: sha256:415440e73af7d4b02a10a15f28bb2fc095cbdffdc2e1676d76e0f0eaa1632d50
generated: 2018-09-28T12:48:52.725939266+02:00
generated: 2018-11-14T11:54:24.338893216Z

View File

@@ -14,11 +14,11 @@ To access Kubeapps from outside your K8s cluster, follow the steps below:
1. Get the Kubeapps URL and associate Kubeapps hostname to your cluster external IP:
{{- range .Values.ingress.hosts }}
export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters
echo "Kubeapps URL: http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}"
echo "$CLUSTER_IP {{ . }}" | sudo tee -a /etc/hosts
{{- end }}
{{- range .Values.ingress.hosts }}
echo "Kubeapps URL: http{{ if .tls }}s{{ end }}://{{ .name }}{{ default "/" .path }}"
echo "$CLUSTER_IP {{ .name }}" | sudo tee -a /etc/hosts
{{- end }}
{{- else }}

View File

@@ -28,7 +28,22 @@ If release name contains chart name it will be used as a full name.
Render image reference
*/}}
{{- define "kubeapps.image" -}}
{{ .registry }}/{{ .repository }}:{{ .tag }}
{{- $image := index . 0 -}}
{{- $global := index . 1 -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}}
{{- if $global -}}
{{- if $global.imageRegistry -}}
{{ $global.imageRegistry }}/{{ $image.repository }}:{{ $image.tag }}
{{- else -}}
{{ $image.registry }}/{{ $image.repository }}:{{ $image.tag }}
{{- end -}}
{{- else -}}
{{ $image.registry }}/{{ $image.repository }}:{{ $image.tag }}
{{- end -}}
{{- end -}}
{{/*

View File

@@ -0,0 +1,17 @@
{{- range .Values.apprepository.initialRepos }}
apiVersion: kubeapps.com/v1alpha1
kind: AppRepository
metadata:
name: {{ .name }}
labels:
app: {{ template "kubeapps.apprepository.fullname" $ }}
chart: {{ template "kubeapps.chart" $ }}
release: {{ $.Release.Name }}
heritage: {{ $.Release.Service }}
annotations:
helm.sh/hook: pre-install
spec:
type: helm
url: {{ .url }}
---
{{ end -}}

View File

@@ -6,7 +6,7 @@ kind: CustomResourceDefinition
metadata:
name: apprepositories.kubeapps.com
annotations:
"helm.sh/resource-policy": keep
"helm.sh/hook": crd-install
labels:
app: {{ template "kubeapps.apprepository.fullname" . }}
chart: {{ template "kubeapps.chart" . }}

View File

@@ -22,12 +22,13 @@ spec:
serviceAccountName: {{ template "kubeapps.apprepository.fullname" . }}
containers:
- name: controller
image: {{ template "kubeapps.image" .Values.apprepository.image }}
image: {{ template "kubeapps.image" (list .Values.apprepository.image .Values.global) }}
command:
- /apprepository-controller
args:
- --logtostderr
- --repo-sync-image={{ template "kubeapps.image" .Values.apprepository.syncImage }}
- --user-agent-comment=kubeapps/{{ .Chart.AppVersion }}
- --repo-sync-image={{ template "kubeapps.image" (list .Values.apprepository.syncImage .Values.global) }}
- --namespace={{ .Release.Namespace }}
- --mongo-url={{ template "kubeapps.mongodb.fullname" . }}
- --mongo-secret-name={{ .Values.mongodb.existingSecret }}

View File

@@ -1,30 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "kubeapps.apprepository-jobs-bootstrap.fullname" . }}
annotations:
helm.sh/hook: post-install
helm.sh/hook-delete-policy: hook-succeeded
helm.sh/hook-weight: "-10"
labels:
app: {{ template "kubeapps.apprepository-jobs-bootstrap.fullname" . }}
chart: {{ template "kubeapps.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
apprepositories.yaml: |-
{{- range .Values.apprepository.initialRepos }}
apiVersion: kubeapps.com/v1alpha1
kind: AppRepository
metadata:
name: {{ .name }}
labels:
app: {{ template "kubeapps.apprepository.fullname" $ }}
chart: {{ template "kubeapps.chart" $ }}
release: {{ $.Release.Name }}
heritage: {{ $.Release.Service }}
spec:
type: helm
url: {{ .url }}
---
{{ end -}}

View File

@@ -1,46 +0,0 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: {{ template "kubeapps.apprepository-jobs-bootstrap.fullname" . }}
annotations:
helm.sh/hook: post-install
helm.sh/hook-delete-policy: hook-succeeded
helm.sh/hook-weight: "-10"
labels:
app: {{ template "kubeapps.apprepository-jobs-bootstrap.fullname" . }}
chart: {{ template "kubeapps.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups:
- kubeapps.com
resources:
- apprepositories
verbs:
- get
- create
- delete
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: {{ template "kubeapps.apprepository-jobs-bootstrap.fullname" . }}
annotations:
helm.sh/hook: post-install
helm.sh/hook-delete-policy: hook-succeeded
helm.sh/hook-weight: "-10"
labels:
app: {{ template "kubeapps.apprepository-jobs-bootstrap.fullname" . }}
chart: {{ template "kubeapps.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "kubeapps.apprepository-jobs-bootstrap.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "kubeapps.apprepository-jobs-bootstrap.fullname" . }}
namespace: {{ .Release.Namespace }}
{{- end -}}

View File

@@ -1,13 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "kubeapps.apprepository-jobs-bootstrap.fullname" . }}
annotations:
helm.sh/hook: post-install
helm.sh/hook-delete-policy: hook-succeeded
helm.sh/hook-weight: "-10"
labels:
app: {{ template "kubeapps.apprepository-jobs-bootstrap.fullname" . }}
chart: {{ template "kubeapps.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}

View File

@@ -1,48 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
name: {{ template "kubeapps.apprepository-jobs-bootstrap.fullname" . }}
annotations:
helm.sh/hook: post-install
helm.sh/hook-delete-policy: hook-succeeded
labels:
app: {{ template "kubeapps.apprepository-jobs-bootstrap.fullname" . }}
chart: {{ template "kubeapps.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
template:
metadata:
labels:
app: {{ template "kubeapps.apprepository-jobs-bootstrap.fullname" . }}
release: {{ .Release.Name }}
spec:
containers:
- name: kubectl
image: {{ template "kubeapps.image" .Values.hooks.image }}
command:
- kubectl
- apply
- -f
- /tmp/apprepositories/apprepositories.yaml
volumeMounts:
- mountPath: /tmp/apprepositories
name: apprepositories-config
volumes:
- name: apprepositories-config
configMap:
name: {{ template "kubeapps.apprepository-jobs-bootstrap.fullname" . }}
restartPolicy: OnFailure
serviceAccountName: {{ template "kubeapps.apprepository-jobs-bootstrap.fullname" . }}
{{- with .Values.hooks.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.hooks.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with.Values.hooks.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}

View File

@@ -20,13 +20,13 @@ spec:
spec:
containers:
- name: kubectl
image: {{ template "kubeapps.image" .Values.hooks.image }}
image: {{ template "kubeapps.image" (list .Values.hooks.image .Values.global) }}
command:
- kubectl
- delete
- apprepositories.kubeapps.com
- -n
- {{ .Release.Namespace }}
- {{ .Release.Namespace }}
- --all
restartPolicy: OnFailure
serviceAccountName: {{ template "kubeapps.apprepository-jobs-cleanup.fullname" . }}

View File

@@ -1,44 +1,4 @@
{{- if .Values.rbac.create -}}
# Need a cluster role because client-go v5.0.1 does not support namespaced
# informers
# TODO: remove when we update to client-go v6.0.0
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: {{ template "kubeapps.apprepository.fullname" . }}
labels:
app: {{ template "kubeapps.apprepository.fullname" . }}
chart: {{ template "kubeapps.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups:
- batch
resources:
- cronjobs
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: {{ template "kubeapps.apprepository.fullname" . }}
labels:
app: {{ template "kubeapps.apprepository.fullname" . }}
chart: {{ template "kubeapps.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "kubeapps.apprepository.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "kubeapps.apprepository.fullname" . }}
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:

View File

@@ -21,7 +21,7 @@ spec:
spec:
containers:
- name: chartsvc
image: {{ template "kubeapps.image" .Values.chartsvc.image }}
image: {{ template "kubeapps.image" (list .Values.chartsvc.image .Values.global) }}
command:
- /chartsvc
args:

View File

@@ -23,7 +23,7 @@ spec:
spec:
containers:
- name: dashboard
image: {{ template "kubeapps.image" .Values.dashboard.image }}
image: {{ template "kubeapps.image" (list .Values.dashboard.image .Values.global) }}
livenessProbe:
{{ toYaml .Values.dashboard.livenessProbe | indent 10 }}
readinessProbe:

View File

@@ -1,38 +1,37 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "kubeapps.fullname" . -}}
{{- $ingressPath := .Values.ingress.path -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ $fullName }}
name: {{ template "kubeapps.fullname" . }}
labels:
app: {{ include "kubeapps.name" . }}
chart: {{ include "kubeapps.chart" . }}
app: {{ template "kubeapps.name" . }}
chart: {{ template "kubeapps.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- with .Values.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
{{- if .Values.ingress.certManager }}
kubernetes.io/tls-acme: "true"
{{- end }}
{{- range $key, $value := .Values.ingress.annotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ . | quote }}
http:
paths:
- path: {{ $ingressPath }}
backend:
serviceName: {{ $fullName }}
servicePort: http
- host: {{ .name }}
http:
paths:
- path: {{ default "/" .path }}
backend:
serviceName: {{ template "kubeapps.fullname" $ }}
servicePort: http
{{- end }}
tls:
{{- range .Values.ingress.hosts }}
{{- if .tls }}
- hosts:
- {{ .name }}
secretName: {{ .tlsSecret }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -38,6 +38,12 @@ data:
# Hide Www-Authenticate to prevent it triggering a basic auth prompt in
# the browser with some clusters
proxy_hide_header Www-Authenticate;
# Keep the connection open with the API server even if idle (the default is 60 seconds)
# Setting it to 1 hour which should be enough for our current use case of deploying/upgrading apps
# If we enable other use-cases in the future we might need to bump this value
# More info here https://github.com/kubeapps/kubeapps/issues/766
proxy_read_timeout 1h;
}
location /api/chartsvc {

View File

@@ -23,7 +23,7 @@ spec:
spec:
containers:
- name: nginx
image: {{ template "kubeapps.image" .Values.frontend.image }}
image: {{ template "kubeapps.image" (list .Values.frontend.image .Values.global) }}
livenessProbe:
{{ toYaml .Values.frontend.livenessProbe | indent 10 }}
readinessProbe:

View File

@@ -1,57 +0,0 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: {{ template "kubeapps.kubeapps-jobs-upgrade.fullname" . }}
annotations:
helm.sh/hook: post-upgrade
helm.sh/hook-delete-policy: hook-succeeded
helm.sh/hook-weight: "-10"
labels:
app: {{ template "kubeapps.kubeapps-jobs-upgrade.fullname" . }}
chart: {{ template "kubeapps.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups:
- kubeapps.com
resources:
- apprepositories
verbs:
- get
- create
- patch
- delete
- apiGroups:
- ""
resources:
- secrets
- pods
verbs:
- get
- list
- create
- delete
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: {{ template "kubeapps.kubeapps-jobs-upgrade.fullname" . }}
annotations:
helm.sh/hook: post-upgrade
helm.sh/hook-delete-policy: hook-succeeded
helm.sh/hook-weight: "-10"
labels:
app: {{ template "kubeapps.kubeapps-jobs-upgrade.fullname" . }}
chart: {{ template "kubeapps.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "kubeapps.kubeapps-jobs-upgrade.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "kubeapps.kubeapps-jobs-upgrade.fullname" . }}
namespace: {{ .Release.Namespace }}
{{- end -}}

View File

@@ -1,13 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "kubeapps.kubeapps-jobs-upgrade.fullname" . }}
annotations:
helm.sh/hook: post-upgrade
helm.sh/hook-delete-policy: hook-succeeded
helm.sh/hook-weight: "-10"
labels:
app: {{ template "kubeapps.kubeapps-jobs-upgrade.fullname" . }}
chart: {{ template "kubeapps.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}

View File

@@ -1,58 +0,0 @@
# This is a temporary upgrade hook for installating a version prior
# to 0.6 since the process of creating the mongodb secret changed
# It should be removed at https://github.com/kubeapps/kubeapps/issues/699
# This Job creates the mongodb credentials secret if it doesn't exists
# and it resyncs the existing apprepositories.
apiVersion: batch/v1
kind: Job
metadata:
name: {{ template "kubeapps.kubeapps-jobs-upgrade.fullname" . }}
annotations:
helm.sh/hook: post-upgrade
helm.sh/hook-delete-policy: hook-succeeded
labels:
app: {{ template "kubeapps.kubeapps-jobs-upgrade.fullname" . }}
chart: {{ template "kubeapps.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
template:
metadata:
labels:
app: {{ template "kubeapps.kubeapps-jobs-upgrade.fullname" . }}
release: {{ .Release.Name }}
spec:
containers:
- name: kubectl
image: {{ template "kubeapps.image" .Values.hooks.image }}
command:
- /bin/sh
- -c
args:
- |-
# Recreate MongoDB secret if doesn't exists (happens for chart versions < 0.5.2)
if kubectl get secrets -n {{ .Release.Namespace }} {{ .Values.mongodb.existingSecret }}; then
# Secret exists, do nothing
echo "MongoDB secret found"
else
# Secret doesn't exists, create it and restart mongodb
kubectl create secret generic -n {{ .Release.Namespace }} {{ .Values.mongodb.existingSecret }} --from-literal mongodb-root-password=`cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1`
kubectl delete pods -n {{ .Release.Namespace }} -l app=mongodb,release={{ .Release.Name }}
kubectl rollout status -n {{ .Release.Namespace }} deployment/{{ .Release.Name }}-mongodb
# Re-sync repositories reseting resyncRequests
kubectl get apprepositories -o=name -n kubeapps {{ .Release.Namespace }} | xargs kubectl patch $1 -n {{ .Release.Namespace }} --type merge -p '{ "spec": { "resyncRequests": 0 } }'
fi
restartPolicy: OnFailure
serviceAccountName: {{ template "kubeapps.kubeapps-jobs-upgrade.fullname" . }}
{{- with .Values.hooks.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.hooks.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.hooks.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}

View File

@@ -19,7 +19,7 @@ spec:
spec:
containers:
- name: kubectl
image: {{ template "kubeapps.image" .Values.hooks.image }}
image: {{ template "kubeapps.image" (list .Values.hooks.image .Values.global) }}
command:
- /bin/sh
args:

View File

@@ -22,11 +22,12 @@ spec:
serviceAccountName: {{ template "kubeapps.tiller-proxy.fullname" . }}
containers:
- name: proxy
image: {{ template "kubeapps.image" .Values.tillerProxy.image }}
image: {{ template "kubeapps.image" (list .Values.tillerProxy.image .Values.global) }}
command:
- /proxy
args:
- --host={{ .Values.tillerProxy.host }}
- --user-agent-comment=kubeapps/{{ .Chart.AppVersion }}
{{- if .Values.tillerProxy.tls }}
- --tls
{{- if .Values.tillerProxy.tls.verify }}
@@ -53,7 +54,7 @@ spec:
{{- if .Values.tillerProxy.tls }}
volumes:
- name: tiller-certs
secret:
secret:
secretName: {{ template "kubeapps.tiller-proxy.fullname" . }}
{{- end }}
{{- with .Values.tillerProxy.nodeSelector }}

View File

@@ -0,0 +1,17 @@
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.secrets }}
apiVersion: v1
kind: Secret
metadata:
name: {{ .name }}
labels:
app: {{ template "kubeapps.name" $ }}
chart: {{ template "kubeapps.chart" $ }}
release: {{ $.Release.Name }}
heritage: {{ $.Release.Service }}
type: kubernetes.io/tls
data:
tls.crt: {{ .certificate | b64enc }}
tls.key: {{ .key | b64enc }}
{{- end }}
{{- end }}

View File

@@ -1,16 +1,53 @@
## Global Docker image registry
## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value
# global:
# imageRegistry:
# The frontend service is the main reverse proxy used to access the Kubeapps UI
# To expose Kubeapps externally either configure the ingress object below or
# set frontend.service.type=LoadBalancer in the frontend configuration.
# ref: http://kubernetes.io/docs/user-guide/ingress/
#
ingress:
# Set to true to enable ingress record generation
enabled: false
annotations: {}
path: /
# Set this to true in order to add the corresponding annotations for cert-manager
certManager: false
# Ingress annotations done as key:value pairs
# For a full list of possible ingress annotations, please see
# ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md
#
# If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
annotations:
# kubernetes.io/ingress.class: nginx
# The list of hostnames to be covered with this ingress record.
# Most likely this will be just one host, but in the event more hosts are needed, this is an array
hosts:
- kubeapps.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
- name: kubeapps.local
path: /
# Set this to true in order to enable TLS on the ingress record
tls: false
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
tlsSecret: kubeapps.local-tls
secrets:
# If you're providing your own certificates, please use this to add the certificates as secrets
# key and certificate should start with -----BEGIN CERTIFICATE----- or
# -----BEGIN RSA PRIVATE KEY-----
#
# name should line up with a tlsSecret set further up
# If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
#
# It is also possible to create and manage the certificates outside of this helm chart
# Please see README.md for more information
# - name: kubeapps.local-tls
# key:
# certificate:
frontend:
replicaCount: 2
@@ -55,12 +92,12 @@ apprepository:
image:
registry: docker.io
repository: kubeapps/apprepository-controller
tag: v1.0.0-beta.2
tag: v1.0.0
# Image used to perform chart repository syncs
syncImage:
registry: docker.io
repository: kubeapps/chart-repo
tag: v1.0.0-beta.2
registry: quay.io
repository: helmpack/chart-repo
tag: v1.0.2
initialRepos:
- name: stable
url: https://kubernetes-charts.storage.googleapis.com
@@ -101,7 +138,7 @@ tillerProxy:
image:
registry: docker.io
repository: kubeapps/tiller-proxy
tag: v1.0.0-beta.2
tag: v1.0.0
service:
port: 8080
host: tiller-deploy.kube-system:44134
@@ -126,9 +163,9 @@ tillerProxy:
chartsvc:
replicaCount: 2
image:
registry: docker.io
repository: kubeapps/chartsvc
tag: v1.0.0-beta.2
registry: quay.io
repository: helmpack/chartsvc
tag: v1.0.2
service:
port: 8080
# https://github.com/kubeapps/kubeapps/issues/478#issuecomment-422979262
@@ -163,7 +200,7 @@ dashboard:
image:
registry: docker.io
repository: kubeapps/dashboard
tag: v1.0.0-beta.2
tag: v1.0.0
service:
port: 8080
livenessProbe:
@@ -190,6 +227,11 @@ dashboard:
tolerations: []
affinity: {}
##
## MongoDB chart configuration
##
## https://github.com/helm/charts/blob/master/stable/mongodb/values.yaml
##
mongodb:
# Kubeapps uses MongoDB as a cache and persistence is not required
persistence:

View File

@@ -1,6 +1,6 @@
name: mean
version: 4.1.0
appVersion: 3.6.4
version: 4.2.3
appVersion: 4.6.2
description: MEAN is a free and open-source JavaScript software stack for building dynamic web sites and web applications. The MEAN stack is MongoDB, Express.js, Angular, and Node.js. Because all components of the MEAN stack support programs written in JavaScript, MEAN applications can be written in one language for both server-side and client-side execution environments.
keywords:
- node

View File

@@ -51,15 +51,16 @@ The following table lists the configurable parameters of the MEAN chart and thei
| Parameter | Description | Default |
|-----------------------------------------|-----------------------------------------------------------|-----------------------------------------------------------|
| `global.imageRegistry` | Global Docker image registry | `nil` |
| `image.registry` | NodeJS image registry | `docker.io` |
| `image.repository` | NodeJS Image name | `bitnami/node` |
| `image.tag` | NodeJS Image tag | `{VERSION}` |
| `image.repository` | NodeJS image name | `bitnami/node` |
| `image.tag` | NodeJS image tag | `{VERSION}` |
| `image.pullPolicy` | NodeJS image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify image pull secrets | `nil` (does not add image pull secrets to deployed pods) |
| `gitImage.registry` | Git image registry | `docker.io` |
| `gitImage.repository` | Git Image name | `alpine/git` |
| `gitImage.tag` | Git Image tag | `latest` |
| `gitImage.pullPolicy` | Git image pull policy | IfNotPresent` |
| `git.registry` | Git image registry | `docker.io` |
| `git.repository` | Git image name | `bitnami/git` |
| `git.tag` | Git image tag | `latest` |
| `git.pullPolicy` | Git image pull policy | IfNotPresent` |
| `repository` | Repo of the application | `https://github.com/bitnami/sample-mean.git` |
| `revision` | Revision to checkout | `master` |
| `replicas` | Number of replicas for the application | `1` |
@@ -217,7 +218,7 @@ ingress:
4. Deploy the helm chart:
```
$ helm install --name node-app --set mongodb.install=false,externaldb.broker.serviceInstanceName=azure-mongodb-instance bitnami/mean
$ helm install --name node-app --set mongodb.install=false,externaldb.broker.serviceInstanceName=azure-mongodb-instance,externaldb.ssl=true bitnami/mean
```
Once the instance has been provisioned in Azure, a new secret should have been automatically created with the connection parameters for your application.

View File

@@ -1,9 +1,9 @@
dependencies:
- name: mongodb
repository: https://kubernetes-charts.storage.googleapis.com/
version: 4.2.3
version: 4.6.2
- name: bitnami-common
repository: https://charts.bitnami.com/bitnami
version: 0.0.3
digest: sha256:e08b8d1bb8197aa8fdc27536aaa1de2e7de210515a451ebe94949a3db55264dd
generated: 2018-09-05T14:56:00.449083032+02:00
generated: 2018-10-25T11:06:24.877576+02:00

View File

@@ -40,3 +40,49 @@ Custom template to get proper service name
{{- printf "%s-%s" .Release.Name "mongodb-binding" | trunc 63 | trimSuffix "-" -}}
{{- end }}
{{- end -}}
{{/*
Return the proper MEAN image name
*/}}
{{- define "mean.image" -}}
{{- $registryName := .Values.image.registry -}}
{{- $repositoryName := .Values.image.repository -}}
{{- $tag := .Values.image.tag | toString -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}}
{{- if .Values.global }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- end -}}
{{/*
Return the proper Git image name
*/}}
{{- define "git.image" -}}
{{- $registryName := .Values.git.registry -}}
{{- $repositoryName := .Values.git.repository -}}
{{- $tag := .Values.git.tag | toString -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}}
{{- if .Values.global }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- end -}}

View File

@@ -28,14 +28,14 @@ spec:
{{- end }}
initContainers:
- name: git-clone-repository
image: "{{ .Values.gitImage.registry }}/{{ .Values.gitImage.repository }}:{{ .Values.gitImage.tag }}"
imagePullPolicy: {{ .Values.gitImage.pullPolicy | quote }}
image: "{{ template "git.image" . }}"
imagePullPolicy: {{ .Values.git.pullPolicy | quote }}
command: [ '/bin/sh', '-c' , 'git clone {{ .Values.repository }} /app && cd /app && git checkout {{ .Values.revision }}']
volumeMounts:
- name: app
mountPath: /app
- name: npm-install
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
image: "{{ template "mean.image" . }}"
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
workingDir: /app
command: ['/bin/bash', '-c', 'useradd bitnami && chown -R bitnami:bitnami /app && npm install']
@@ -44,7 +44,7 @@ spec:
mountPath: /app
containers:
- name: {{ template "mean.fullname" . }}
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
image: "{{ template "mean.image" . }}"
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
env:
{{- if .Values.mongodb.install }}

View File

@@ -1,10 +1,16 @@
## Global Docker image registry
## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value
##
# global:
# imageRegistry:
## Bitnami node image version
## ref: https://hub.docker.com/r/bitnami/node/tags/
##
image:
registry: docker.io
repository: bitnami/node
tag: 9.11.1-prod
tag: 8.12.0-prod
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
@@ -17,9 +23,9 @@ image:
# pullSecrets:
# - myRegistrKeySecretName
gitImage:
git:
registry: docker.io
repository: alpine/git
repository: bitnami/git
tag: latest
pullPolicy: IfNotPresent
@@ -84,6 +90,8 @@ resources: {}
##
## MongoDB chart configuration
##
## https://github.com/helm/charts/blob/master/stable/mongodb/values.yaml
##
mongodb:
## MongoDB admin password
## ref: https://github.com/bitnami/bitnami-docker-MongoDB/blob/master/README.md#setting-the-root-password-on-first-run

View File

@@ -1,6 +1,6 @@
name: memcached
version: 1.2.0
appVersion: 1.5.11
version: 1.3.0
appVersion: 1.5.12
description: Chart for Memcached
keywords:
- memcached

View File

@@ -47,6 +47,7 @@ The following tables lists the configurable parameters of the Memcached chart an
| Parameter | Description | Default |
|-----------------------------|-------------------------------------|---------------------------------------------------------- |
| `global.imageRegistry` | Global Docker image registry | `nil` |
| `image.registry` | Memcached image registry | `docker.io` |
| `image.repository` | Memcached Image name | `bitnami/memcached` |
| `image.tag` | Memcached Image tag | `{VERSION}` |

View File

@@ -24,3 +24,25 @@ Return the proper image name (for the metrics image)
{{- $tag := .Values.metrics.image.tag | toString -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{/*
Return the proper Memcached image name
*/}}
{{- define "memcached.image" -}}
{{- $registryName := .Values.image.registry -}}
{{- $repositoryName := .Values.image.repository -}}
{{- $tag := .Values.image.tag | toString -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}}
{{- if .Values.global }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}

View File

@@ -41,7 +41,7 @@ spec:
{{- end }}
containers:
- name: {{ template "fullname" . }}
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
image: "{{ template "memcached.image" . }}"
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
env:
- name: MEMCACHED_USERNAME

View File

@@ -1,10 +1,16 @@
## Global Docker image registry
## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value
##
# global:
# imageRegistry:
## Bitnami Memcached image version
## ref: https://hub.docker.com/r/bitnami/memcached/tags/
##
image:
registry: docker.io
repository: bitnami/memcached
tag: 1.5.11-debian-9
tag: 1.5.12
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images

View File

@@ -1,5 +1,5 @@
name: metrics-server
version: 2.0.5
version: 2.1.1
appVersion: 0.3.1
description: Metrics Server is a cluster-wide aggregator of resource usage data. Metrics Server collects metrics from the Summary API, exposed by Kubelet on each node.
keywords:

View File

@@ -47,6 +47,7 @@ The following tables lists the configurable parameters of the Metrics Server cha
| Parameter | Description | Default |
|--------------------------|-----------------------------------------------------------------------------|----------------------------------------|
| `global.imageRegistry` | Global Docker image registry | `nil` |
| `image.registry` | Metrics Server image registry | `docker.io` |
| `image.repository` | Metrics Server image name | `bitnami/metrics-server` |
| `image.tag` | Metrics Server image tag | `{VERSION}` |

Some files were not shown because too many files have changed in this diff Show More