mirror of
https://github.com/bitnami/charts.git
synced 2026-02-10 03:17:20 +08:00
177 lines
9.0 KiB
Plaintext
177 lines
9.0 KiB
Plaintext
CHART NAME: {{ .Chart.Name }}
|
|
CHART VERSION: {{ .Chart.Version }}
|
|
APP VERSION: {{ .Chart.AppVersion }}
|
|
|
|
⚠ WARNING: Since August 28th, 2025, only a limited subset of images/charts are available for free.
|
|
Subscribe to Bitnami Secure Images to receive continued support and security updates.
|
|
More info at https://bitnami.com and https://github.com/bitnami/containers/issues/83267
|
|
|
|
** Please be patient while the chart is being deployed **
|
|
|
|
{{- if .Values.diagnosticMode.enabled }}
|
|
|
|
The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with:
|
|
|
|
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }}
|
|
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }}
|
|
|
|
Get the list of pods by executing:
|
|
|
|
kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}
|
|
|
|
{{- else }}
|
|
|
|
{{- if or (contains .Values.service.type "LoadBalancer") (contains .Values.service.type "nodePort") }}
|
|
{{- if not .Values.auth.enabled }}
|
|
{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }}
|
|
|
|
-------------------------------------------------------------------------------
|
|
WARNING
|
|
|
|
By specifying "service.type=NodePort/LoadBalancer" and "auth.enabled=false"
|
|
you have most likely exposed the NATS service externally without any authentication
|
|
mechanism.
|
|
|
|
For security reasons, we strongly suggest that you switch to "ClusterIP". As
|
|
alternative, you can also switch to "auth.enabled=true" providing a valid
|
|
password on "auth.password" parameter.
|
|
|
|
-------------------------------------------------------------------------------
|
|
{{- end }}
|
|
{{- end }}
|
|
{{- end }}
|
|
|
|
NATS can be accessed via port {{ .Values.service.ports.client }} on the following DNS name from within your cluster:
|
|
|
|
{{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
|
|
|
|
{{- if .Values.auth.enabled }}
|
|
|
|
To get the authentication credentials, run:
|
|
|
|
{{- if .Values.auth.token }}
|
|
export NATS_TOKEN=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} -o jsonpath='{.data.*}' | base64 -d | sed -n 's/.*token: "\([^"]*\)".*/\1/p' | head -n 1)
|
|
{{- else }}
|
|
export NATS_USER=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} -o jsonpath='{.data.*}' | base64 -d | sed -n 's/.*user: "\([^"]*\)".*/\1/p' | head -n 1)
|
|
export NATS_PASS=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} -o jsonpath='{.data.*}' | base64 -d | sed -n 's/.*password: "\([^"]*\)".*/\1/p' | head -n 1)
|
|
echo -e "Client credentials:\n\tUser: $NATS_USER\n\tPassword: $NATS_PASS"
|
|
{{- end }}
|
|
|
|
{{- end }}
|
|
|
|
NATS monitoring service can be accessed via port {{ .Values.service.ports.monitoring }} on the following DNS name from within your cluster:
|
|
|
|
{{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
|
|
|
|
You can create a pod to be used as a NATS client:
|
|
|
|
cat <<EOF | kubectl apply -f -
|
|
apiVersion: v1
|
|
kind: Pod
|
|
metadata:
|
|
name: {{ include "common.names.fullname" . }}-client
|
|
namespace: {{ .Release.Namespace }}
|
|
spec:
|
|
containers:
|
|
- name: cli
|
|
image: docker.io/bitnami/natscli
|
|
command: ["sleep", "infinity"]
|
|
env:
|
|
{{- if .Values.auth.token }}
|
|
- name: NATS_TOKEN
|
|
value: "$NATS_TOKEN"
|
|
{{- else }}
|
|
- name: NATS_USER
|
|
value: "$NATS_USER"
|
|
- name: NATS_PASS
|
|
value: "$NATS_PASS"
|
|
{{- end }}
|
|
{{- if .Values.tls.enabled }}
|
|
volumeMounts:
|
|
- mountPath: /etc/certs/ca
|
|
name: ca-cert
|
|
readOnly: true
|
|
- mountPath: /etc/certs/client
|
|
name: client-cert
|
|
readOnly: true
|
|
volumes:
|
|
- name: ca-cert
|
|
secret:
|
|
secretName: {{ template "nats.tls.ca.secretName" . }}
|
|
- name: client-cert
|
|
secret:
|
|
secretName: {{ template "nats.tls.client.secretName" . }}
|
|
{{- end }}
|
|
EOF
|
|
|
|
Then, access the pod and connect to NATS:
|
|
|
|
kubectl exec --tty -i {{ include "common.names.fullname" . }}-client --namespace {{ .Release.Namespace }} -- bash
|
|
{{- if .Values.auth.enabled }}
|
|
{{- if .Values.auth.token }}
|
|
nats -s nats://$NATS_TOKEN@{{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.ports.client }} {{ if .Values.tls.enabled }}--tlscert /etc/certs/client/tls.crt --tlskey /etc/certs/client/tls.key --tlsca /etc/certs/ca/tls.crt{{ end }} subscribe SomeSubject &
|
|
nats -s nats://$NATS_TOKEN@{{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.ports.client }} {{ if .Values.tls.enabled }}--tlscert /etc/certs/client/tls.crt --tlskey /etc/certs/client/tls.key --tlsca /etc/certs/ca/tls.crt{{ end }} publish SomeSubject "Some message"
|
|
{{- else }}
|
|
nats -s nats://$NATS_USER:$NATS_PASS@{{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.ports.client }} {{ if .Values.tls.enabled }}--tlscert /etc/certs/client/tls.crt --tlskey /etc/certs/client/tls.key --tlsca /etc/certs/ca/tls.crt{{ end }} subscribe SomeSubject
|
|
nats -s nats://$NATS_USER:$NATS_PASS@{{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.ports.client }} {{ if .Values.tls.enabled }}--tlscert /etc/certs/client/tls.crt --tlskey /etc/certs/client/tls.key --tlsca /etc/certs/ca/tls.crt{{ end }} publish SomeSubject "Some message"
|
|
{{- end }}
|
|
{{- else }}
|
|
nats -s nats://{{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.ports.client }} {{ if .Values.tls.enabled }}--tlscert /etc/certs/client/tls.crt --tlskey /etc/certs/client/tls.key --tlsca /etc/certs/ca/tls.crt{{ end }} subscribe SomeSubject
|
|
nats -s nats://{{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.ports.client }} {{ if .Values.tls.enabled }}--tlscert /etc/certs/client/tls.crt --tlskey /etc/certs/client/tls.key --tlsca /etc/certs/ca/tls.crt{{ end }} publish SomeSubject "Some message"
|
|
{{- end }}
|
|
|
|
To access the Monitoring svc from outside the cluster, follow the steps below:
|
|
|
|
{{- if .Values.ingress.enabled }}
|
|
|
|
1. Get the hostname indicated on the Ingress Rule and associate it to your cluster external IP:
|
|
|
|
export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters
|
|
echo "Monitoring URL: http{{ if .Values.ingress.tls }}s{{ end }}://{{ .Values.ingress.hostname }}"
|
|
echo "$CLUSTER_IP {{ .Values.ingress.hostname }}" | sudo tee -a /etc/hosts
|
|
|
|
{{- else }}
|
|
|
|
1. Get the NATS monitoring URL by running:
|
|
|
|
{{- if contains "NodePort" .Values.service.type }}
|
|
|
|
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
|
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.names.fullname" . }})
|
|
echo "Monitoring URL: http://$NODE_IP:$NODE_PORT/"
|
|
|
|
{{- else if contains "LoadBalancer" .Values.service.type }}
|
|
|
|
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
|
Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "common.names.fullname" . }}'
|
|
|
|
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
|
|
echo "Monitoring URL: http://$SERVICE_IP/"
|
|
|
|
{{- else if contains "ClusterIP" .Values.service.type }}
|
|
|
|
echo "Monitoring URL: http://127.0.0.1:{{ .Values.service.ports.monitoring }}"
|
|
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "common.names.fullname" . }} {{ .Values.service.ports.monitoring }}:{{ .Values.service.ports.monitoring }}
|
|
{{- end }}
|
|
{{- end }}
|
|
|
|
2. Open a browser and access the NATS monitoring browsing to the Monitoring URL
|
|
{{- if .Values.metrics.enabled }}
|
|
|
|
3. Get the NATS Prometheus Metrics URL by running:
|
|
|
|
echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.metrics.service.port }}/metrics"
|
|
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ printf "%s-metrics" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} {{ .Values.metrics.service.port }}:{{ .Values.metrics.service.port }}
|
|
|
|
4. Access NATS Prometheus metrics by opening the URL obtained in a browser.
|
|
|
|
{{- end }}
|
|
{{- end }}
|
|
|
|
{{- include "common.warnings.rollingTag" .Values.image }}
|
|
{{- include "common.warnings.rollingTag" .Values.metrics.image }}
|
|
{{- include "common.warnings.resources" (dict "sections" (list "metrics" "") "context" $) }}
|
|
{{- include "common.warnings.modifiedImages" (dict "images" (list .Values.image .Values.metrics.image) "context" $) }}
|
|
{{- include "common.errors.insecureImages" (dict "images" (list .Values.image .Values.metrics.image) "context" $) }}
|
|
{{- include "nats.validateValues" . }}
|