diff --git a/upstreamed/nats/.helmignore b/upstreamed/nats/.helmignore new file mode 100644 index 0000000000..f0c1319444 --- /dev/null +++ b/upstreamed/nats/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/upstreamed/nats/Chart.yaml b/upstreamed/nats/Chart.yaml new file mode 100644 index 0000000000..a4594f647d --- /dev/null +++ b/upstreamed/nats/Chart.yaml @@ -0,0 +1,17 @@ +name: nats +version: 0.0.1 +appVersion: 1.1.0 +description: An open-source, cloud-native messaging system +keywords: +- nats +- messaging +- addresing +- discovery +home: https://nats.io/ +sources: +- https://github.com/bitnami/bitnami-docker-nats +maintainers: +- name: Bitnami + email: containers@bitnami.com +engine: gotpl +icon: https://bitnami.com/assets/stacks/nats/img/nats-stack-110x117.png diff --git a/upstreamed/nats/OWNERS b/upstreamed/nats/OWNERS new file mode 100644 index 0000000000..2c3e9fa1a4 --- /dev/null +++ b/upstreamed/nats/OWNERS @@ -0,0 +1,12 @@ +approvers: +- prydonius +- tompizmor +- sameersbn +- carrodher +- juan131 +reviewers: +- prydonius +- tompizmor +- sameersbn +- carrodher +- juan131 diff --git a/upstreamed/nats/README.md b/upstreamed/nats/README.md new file mode 100644 index 0000000000..ee32830a3d --- /dev/null +++ b/upstreamed/nats/README.md @@ -0,0 +1,149 @@ +# NATS + +[NATS](https://nats.io/) is an open-source, cloud-native messaging system. It provides a lightweight server that is written in the Go programming language. + +## TL;DR + +```bash +$ helm install stable/nats +``` + +## Introduction + +This chart bootstraps a [NATS](https://github.com/bitnami/bitnami-docker-nats) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes 1.4+ with Beta APIs enabled +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/nats +``` + +The command deploys NATS on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the NATS chart and their default values. + +| Parameter | Description | Default | +|--------------------------------------|----------------------------------------------------------------------------------------------|-----------------------------------| +| `image.registry` | NATS image registry | `docker.io` | +| `image.repository` | NATS Image name | `bitnami/nats` | +| `image.tag` | NATS Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `clusterAuth.enabled` | Switch to enable/disable cluster authentication | `true` | +| `clusterAuth.user` | Cluster authentication user | `nats_cluster` | +| `clusterAuth.password` | Cluster authentication password | `random alhpanumeric string (10)` | +| `clusterAuth.token` | Cluster authentication token | `nil` | +| `debug.enabled` | Switch to enable/disable debug on logging | `false` | +| `debug.trace` | Switch to enable/disable trace debug level on logging | `false` | +| `debug.logtime` | Switch to enable/disable logtime on logging | `false` | +| `maxConnections` | Max. number of client connections | `nil` | +| `maxControlLine` | Max. protocol control line | `nil` | +| `maxPayload` | Max. payload | `nil` | +| `writeDeadline` | Duration the server can block on a socket write to a client | `nil` | +| `replicaCount` | Number of NATS nodes | `1` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `updateStrategy` | Replicaset Update strategy | `OnDelete` | +| `rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | {} | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `clientService.type` | Kubernetes Service type (NATS client) | `ClusterIP` | +| `clientService.port` | NATS client port | `4222` | +| `clientService.nodePort` | Port to bind to for NodePort service type (NATS client) | `nil` | +| `clientService.annotations` | Annotations for NATS client service | {} | +| `clientService.loadBalancerIP` | loadBalancerIP if NATS client service type is `LoadBalancer` | `nil` | +| `clusterService.type` | Kubernetes Service type (NATS cluster) | `ClusterIP` | +| `clusterService.port` | NATS cluster port | `6222` | +| `clusterService.nodePort` | Port to bind to for NodePort service type (NATS cluster) | `nil` | +| `clusterService.annotations` | Annotations for NATS cluster service | {} | +| `clusterService.loadBalancerIP` | loadBalancerIP if NATS cluster service type is `LoadBalancer` | `nil` | +| `monitoringService.type` | Kubernetes Service type (NATS monitoring) | `ClusterIP` | +| `monitoringService.port` | NATS monitoring port | `8222` | +| `monitoringService.nodePort` | Port to bind to for NodePort service type (NATS monitoring) | `nil` | +| `monitoringService.annotations` | Annotations for NATS monitoring service | {} | +| `monitoringService.loadBalancerIP` | loadBalancerIP if NATS monitoring service type is `LoadBalancer` | `nil` | +| `ingress.enabled` | Enable ingress controller resource | `false` | +| `ingress.hosts[0].name` | Hostname for NATS monitoring | `nats.local` | +| `ingress.hosts[0].path` | Path within the url structure | `/` | +| `ingress.hosts[0].tls` | Utilize TLS backend in ingress | `false` | +| `ingress.hosts[0].tlsSecret` | TLS Secret (certificates) | `nats.local-tls-secret` | +| `ingress.hosts[0].annotations` | Annotations for this host's ingress record | `[]` | +| `ingress.secrets[0].name` | TLS Secret Name | `nil` | +| `ingress.secrets[0].certificate` | TLS Secret Certificate | `nil` | +| `ingress.secrets[0].key` | TLS Secret Key | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Allow external connections | `true` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install --name my-release \ + --set auth.enabled=true,auth.user=my-user,auth.password=T0pS3cr3t \ + stable/nats +``` + +The above command enables NATS client authentication with `my-user` as user and `T0pS3cr3t` as password credentials. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/nats +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Production settings and horizontal scaling + +The [values-production.yaml](values-production.yaml) file consists a configuration to deploy a scalable and high-available NATS deployment for production environments. We recommend that you base your production configuration on this template and adjust the parameters appropriately. + +```console +$ curl -O https://raw.githubusercontent.com/kubernetes/charts/master/stable/nats/values-production.yaml +$ helm install --name my-release -f ./values-production.yaml stable/nats +``` + +To horizontally scale this chart, run the following command to scale the number of nodes in your NATS replica set. + +```console +$ kubectl scale statefulset my-release-nats --replicas=3 +``` diff --git a/upstreamed/nats/templates/NOTES.txt b/upstreamed/nats/templates/NOTES.txt new file mode 100644 index 0000000000..415bcd31db --- /dev/null +++ b/upstreamed/nats/templates/NOTES.txt @@ -0,0 +1,77 @@ +** Please be patient while the chart is being deployed ** + +{{- if or (contains .Values.clientService.type "LoadBalancer") (contains .Values.clientService.type "nodePort") }} +{{- if not .Values.auth.enabled }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "clientService.type=NodePort/LoadBalancer" and "auth.enabled=false" + you have most likely exposed the NATS service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP". As + alternative, you can also switch to "auth.enabled=true" providing a valid + password on "auth.password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +NATS can be accessed via port {{ .Values.clientService.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-client.{{ .Release.Namespace }}.svc.cluster.local + +{{- if .Values.auth.enabled }} +To get the authentication credentials, run: + + export NATS_USER=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 user | awk '{print $2}') + export NATS_PASS=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 password | awk '{print $2}') + echo -e "Client credentials:\n\tUser: $NATS_USER\n\tPassword: $NATS_PASS" + +{{- end }} + +NATS monitoring service can be accessed via port {{ .Values.monitoringService.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-monitoring.{{ .Release.Namespace }}.svc.cluster.local + +To access the Monitoring svc from outside the cluster, follow the steps below: + +{{- if .Values.ingress.enabled }} + +1. Get the hostname indicated on the Ingress Rule and associate it to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring -o jsonpath='{.spec.rules[0].host}') + echo "Monitoring URL: http://$HOSTNAME/" + echo "$CLUSTER_IP $HOSTNAME" | sudo tee -a /etc/hosts + +2. Open a browser and access the NATS monitoring browsing to the Monitoring URL + +{{- else }} + +1. Get the NATS monitoring URL by running: + +{{- if contains "NodePort" .Values.monitoringService.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats.fullname" . }}-monitoring) + echo "Monitoring URL: http://$NODE_IP:$NODE_PORT/" +{{- else if contains "LoadBalancer" .Values.monitoringService.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "nats.fullname" . }}-monitoring' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo "Monitoring URL: http://$SERVICE_IP/" +{{- else if contains "ClusterIP" .Values.monitoringService.type }} + + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "nats.name" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Monitoring URL: http://127.0.0.1:{{ .Values.monitoringService.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME {{ .Values.monitoringService.port }}:{{ .Values.monitoringService.port }} +{{- end }} + +2. Access the NATS monitoring opening the URL obtained on a browser. +{{- end }} diff --git a/upstreamed/nats/templates/_helpers.tpl b/upstreamed/nats/templates/_helpers.tpl new file mode 100644 index 0000000000..c3ed4c5ec8 --- /dev/null +++ b/upstreamed/nats/templates/_helpers.tpl @@ -0,0 +1,40 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "nats.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper image name +*/}} +{{- define "nats.image" -}} +{{- $tag := .Values.image.tag | toString -}} +{{- printf "%s/%s:%s" .Values.image.registry .Values.image.repository $tag -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} diff --git a/upstreamed/nats/templates/client-svc.yaml b/upstreamed/nats/templates/client-svc.yaml new file mode 100644 index 0000000000..13b12ddbbf --- /dev/null +++ b/upstreamed/nats/templates/client-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-client + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.clientService.annotations }} + annotations: +{{ toYaml .Values.clientService.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.clientService.type }} + {{- if and (eq .Values.clientService.type "LoadBalancer") .Values.clientService.loadBalancerIP -}} + loadBalancerIP: {{ .Values.clientService.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.clientService.port }} + targetPort: client + name: client + {{- if and (eq .Values.clientService.type "NodePort") (not (empty .Values.clientService.nodePort)) }} + nodePort: {{ .Values.clientService.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/upstreamed/nats/templates/cluster-svc.yaml b/upstreamed/nats/templates/cluster-svc.yaml new file mode 100644 index 0000000000..2cfc36cf59 --- /dev/null +++ b/upstreamed/nats/templates/cluster-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-cluster + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.clusterService.annotations }} + annotations: +{{ toYaml .Values.clusterService.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.clusterService.type }} + {{- if and (eq .Values.clusterService.type "LoadBalancer") .Values.clusterService.loadBalancerIP -}} + loadBalancerIP: {{ .Values.clusterService.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.clusterService.port }} + targetPort: cluster + name: cluster + {{- if and (eq .Values.clusterService.type "NodePort") (not (empty .Values.clusterService.nodePort)) }} + nodePort: {{ .Values.clusterService.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/upstreamed/nats/templates/configmap.yaml b/upstreamed/nats/templates/configmap.yaml new file mode 100644 index 0000000000..c1052a6bf3 --- /dev/null +++ b/upstreamed/nats/templates/configmap.yaml @@ -0,0 +1,84 @@ +{{- $authPwd := default (randAlphaNum 10) .Values.auth.password -}} +{{- $clusterAuthPwd := default (randAlphaNum 10) .Values.clusterAuth.password -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + name: {{ template "nats.fullname" . }} +data: + gnatsd.conf: |- + listen: 0.0.0.0:{{ .Values.clientService.port }} + http: 0.0.0.0:{{ .Values.monitoringService.port }} + + # Authorization for client connections + {{- if .Values.auth.enabled }} + authorization { + {{- if .Values.auth.user }} + user: {{ .Values.auth.user }} + password: {{ $authPwd }} + {{- else if .Values.auth.token }} + token: {{ .Values.auth.token }} + {{- end }} + timeout: 1 + } + {{- end }} + + # Logging options + debug: {{ .Values.debug.enabled }} + trace: {{ .Values.debug.trace }} + logtime: {{ .Values.debug.logtime }} + + # Pid file + pid_file: "/tmp/gnatsd.pid" + + # Some system overides + {{- if .Values.maxConnections }} + max_connections: {{ .Values.maxConnections }} + {{- end }} + {{- if .Values.maxControlLine }} + max_control_line: {{ .Values.maxControlLine }} + {{- end }} + {{- if .Values.maxPayload }} + max_payload: {{ .Values.maxPayload }} + {{- end }} + {{- if .Values.writeDeadline }} + write_deadline: {{ .Values.writeDeadline | quote }} + {{- end }} + + + # Clustering definition + cluster { + listen: 0.0.0.0:{{ .Values.clusterService.port }} + + # Authorization for cluster connections + {{- if .Values.clusterAuth.enabled }} + authorization { + {{- if .Values.clusterAuth.user }} + user: {{ .Values.clusterAuth.user }} + password: {{ $clusterAuthPwd }} + {{- else if .Values.clusterAuth.token }} + token: {{ .Values.clusterAuth.token }} + {{- end }} + timeout: 1 + } + {{- end }} + + # Routes are actively solicited and connected to from this server. + # Other servers can connect to us if they supply the correct credentials + # in their routes definitions from above + routes = [ + {{- if .Values.clusterAuth.enabled }} + {{- if .Values.clusterAuth.user }} + nats://{{ .Values.clusterAuth.user }}:{{ $clusterAuthPwd }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.clusterService.port }} + {{- else if .Values.clusterAuth.token }} + nats://{{ .Values.clusterAuth.token }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.clusterService.port }} + {{- end }} + {{- else }} + nats://{{ template "nats.fullname" . }}-cluster:{{ .Values.clusterService.port }} + {{- end }} + ] + } diff --git a/upstreamed/nats/templates/ingress.yaml b/upstreamed/nats/templates/ingress.yaml new file mode 100644 index 0000000000..eec97491e1 --- /dev/null +++ b/upstreamed/nats/templates/ingress.yaml @@ -0,0 +1,36 @@ +{{- if .Values.ingress.enabled -}} +{{- range .Values.ingress.hosts }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "nats.fullname" $ }}-monitoring + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} + annotations: + {{- if .tls }} + ingress.kubernetes.io/secure-backends: "true" + {{- end }} + {{- range $key, $value := .annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + backend: + serviceName: {{ template "nats.fullname" $ }}-monitoring + servicePort: monitoring +{{- if .tls }} + tls: + - hosts: + - {{ .name }} + secretName: {{ .tlsSecret }} +{{- end }} +--- +{{- end }} +{{- end }} diff --git a/upstreamed/nats/templates/monitoring-svc.yaml b/upstreamed/nats/templates/monitoring-svc.yaml new file mode 100644 index 0000000000..9afeb14d95 --- /dev/null +++ b/upstreamed/nats/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-monitoring + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoringService.annotations }} + annotations: +{{ toYaml .Values.monitoringService.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoringService.type }} + {{- if and (eq .Values.monitoringService.type "LoadBalancer") .Values.monitoringService.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoringService.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.monitoringService.port }} + targetPort: monitoring + name: monitoring + {{- if and (eq .Values.monitoringService.type "NodePort") (not (empty .Values.monitoringService.nodePort)) }} + nodePort: {{ .Values.monitoringService.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/upstreamed/nats/templates/networkpolicy.yaml b/upstreamed/nats/templates/networkpolicy.yaml new file mode 100644 index 0000000000..e4a43dc977 --- /dev/null +++ b/upstreamed/nats/templates/networkpolicy.yaml @@ -0,0 +1,30 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.clientService.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "nats.fullname" . }}-client: "true" + {{- end }} + - ports: + - port: {{ .Values.clusterService.port }} + - ports: + - port: {{ .Values.monitoringService.port }} +{{- end }} diff --git a/upstreamed/nats/templates/statefulset.yaml b/upstreamed/nats/templates/statefulset.yaml new file mode 100644 index 0000000000..b0b330c98f --- /dev/null +++ b/upstreamed/nats/templates/statefulset.yaml @@ -0,0 +1,119 @@ +apiVersion: apps/v1beta2 +kind: StatefulSet +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats.name" . }} + image: {{ template "nats.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - gnatsd + args: + - -c + - /opt/bitnami/nats/gnatsd.conf + ports: + - name: client + containerPort: {{ .Values.clientService.port }} + - name: cluster + containerPort: {{ .Values.clusterService.port }} + - name: monitoring + containerPort: {{ .Values.monitoringService.port }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumeMounts: + - name: config + mountPath: /opt/bitnami/nats/gnatsd.conf + subPath: gnatsd.conf + volumes: + - name: config + configMap: + name: {{ template "nats.fullname" . }} diff --git a/upstreamed/nats/templates/tls-secret.yaml b/upstreamed/nats/templates/tls-secret.yaml new file mode 100644 index 0000000000..5acf4411bf --- /dev/null +++ b/upstreamed/nats/templates/tls-secret.yaml @@ -0,0 +1,18 @@ +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} diff --git a/upstreamed/nats/values-production.yaml b/upstreamed/nats/values-production.yaml new file mode 100644 index 0000000000..ed4e07cb67 --- /dev/null +++ b/upstreamed/nats/values-production.yaml @@ -0,0 +1,241 @@ +## Bitnami NATS image version +## ref: https://hub.docker.com/r/bitnami/nats/tags/ +## +image: + registry: docker.io + repository: bitnami/nats + tag: 1.1.0 + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +## NATS replicas +replicaCount: 3 + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## NATS Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: OnDelete + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +auth: + enabled: true + user: nats_client + # password: + # token: + +## Cluster Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +clusterAuth: + enabled: true + user: nats_cluster + # password: + # token: + +## Logging parameters +## ref: https://github.com/nats-io/gnatsd#command-line-arguments +## +debug: + enabled: false + trace: false + logtime: false + +## System overrides parameters +## ref: https://github.com/nats-io/gnatsd#configuration-file +## +maxConnections: 100 +maxControlLine: 512 +maxPayload: 65536 +writeDeadline: "2s" + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port NATS is listening + ## on. When true, NATS will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: false + +## NATS svc used for client connections +## ref: https://github.com/nats-io/gnatsd#running +## +clientService: + ## Kubernetes service type + type: ClusterIP + port: 4222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: +## Kubernetes svc used for clustering +## ref: https://github.com/nats-io/gnatsd#clustering +## +clusterService: + ## Kubernetes service type + type: ClusterIP + port: 6222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: +## NATS svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoringService: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Configure the ingress resource that allows you to access the +## NATS Monitoring. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + enabled: true + # The list of hostnames to be covered with this ingress record. + # Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: nats.local + + ## Set this to true in order to enable TLS on the ingress record + tls: false + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: nats.local-tls + + ## Ingress annotations done as key:value pairs + ## If you're using kube-lego, you will want to add: + ## kubernetes.io/tls-acme: true + ## + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: true + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: nats.local-tls + # key: + # certificate: diff --git a/upstreamed/nats/values.yaml b/upstreamed/nats/values.yaml new file mode 100644 index 0000000000..039a068823 --- /dev/null +++ b/upstreamed/nats/values.yaml @@ -0,0 +1,245 @@ +## Bitnami NATS image version +## ref: https://hub.docker.com/r/bitnami/nats/tags/ +## +image: + registry: docker.io + repository: bitnami/nats + tag: 1.1.0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +## NATS replicas +replicaCount: 1 + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## NATS Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: OnDelete + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +auth: + enabled: true + user: nats_client + # password: + # token: + +## Cluster Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +clusterAuth: + enabled: true + user: nats_cluster + # password: + # token: + +## Logging parameters +## ref: https://github.com/nats-io/gnatsd#command-line-arguments +## +debug: + enabled: false + trace: false + logtime: false + +## System overrides parameters +## ref: https://github.com/nats-io/gnatsd#configuration-file +## +# maxConnections: 100 +# maxControlLine: 512 +# maxPayload: 65536 +# writeDeadline: "2s" + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client labels will have network access to the port NATS is listening + ## on. When true, NATS will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## NATS svc used for client connections +## ref: https://github.com/nats-io/gnatsd#running +## +clientService: + ## Kubernetes service type + type: ClusterIP + port: 4222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: +## Kubernetes svc used for clustering +## ref: https://github.com/nats-io/gnatsd#clustering +## +clusterService: + ## Kubernetes service type + type: ClusterIP + port: 6222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: +## NATS svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoringService: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Configure the ingress resource that allows you to access the +## NATS Monitoring. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + enabled: false + # The list of hostnames to be covered with this ingress record. + # Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: nats.local + + ## Set this to true in order to enable TLS on the ingress record + tls: false + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: nats.local-tls + + ## Ingress annotations done as key:value pairs + ## If you're using kube-lego, you will want to add: + ## kubernetes.io/tls-acme: true + ## + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: true + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: nats.local-tls + # key: + # certificate: diff --git a/upstreamed/phabricator/Chart.yaml b/upstreamed/phabricator/Chart.yaml index ca563dd6d6..14f2dcea1b 100644 --- a/upstreamed/phabricator/Chart.yaml +++ b/upstreamed/phabricator/Chart.yaml @@ -1,6 +1,6 @@ name: phabricator -version: 2.0.1 -appVersion: 2018.24.0 +version: 2.0.2 +appVersion: 2018.25.0 description: Collection of open source web applications that help software companies build better software. keywords: - phabricator diff --git a/upstreamed/phabricator/values.yaml b/upstreamed/phabricator/values.yaml index 77fd451856..bb29e478e0 100644 --- a/upstreamed/phabricator/values.yaml +++ b/upstreamed/phabricator/values.yaml @@ -4,7 +4,7 @@ image: registry: docker.io repository: bitnami/phabricator - tag: 2018.24.0 + tag: 2018.25.0 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images