mirror of
https://github.com/bitnami/charts.git
synced 2026-02-10 03:17:20 +08:00
125 lines
7.2 KiB
Plaintext
125 lines
7.2 KiB
Plaintext
CHART NAME: {{ .Chart.Name }}
|
|
CHART VERSION: {{ .Chart.Version }}
|
|
APP VERSION: {{ .Chart.AppVersion }}
|
|
|
|
⚠ WARNING: Since August 28th, 2025, only a limited subset of images/charts are available for free.
|
|
Subscribe to Bitnami Secure Images to receive continued support and security updates.
|
|
More info at https://bitnami.com and https://github.com/bitnami/containers/issues/83267
|
|
|
|
** Please be patient while the chart is being deployed **
|
|
|
|
Watch the Prometheus Operator Deployment status using the command:
|
|
|
|
kubectl get deploy -w --namespace {{ include "common.names.namespace" . }} -l app.kubernetes.io/name={{ template "kube-prometheus.operator.name" . }},app.kubernetes.io/instance={{ .Release.Name }}
|
|
|
|
{{- if .Values.prometheus.enabled }}
|
|
|
|
Watch the Prometheus StatefulSet status using the command:
|
|
|
|
kubectl get sts -w --namespace {{ include "common.names.namespace" . }} -l app.kubernetes.io/name={{ template "kube-prometheus.prometheus.name" . }},app.kubernetes.io/instance={{ .Release.Name }}
|
|
|
|
Prometheus can be accessed via port "{{ .Values.prometheus.service.ports.http }}" on the following DNS name from within your cluster:
|
|
|
|
{{ template "kube-prometheus.prometheus.fullname" . }}.{{ include "common.names.namespace" . }}.svc.cluster.local
|
|
|
|
To access Prometheus from outside the cluster execute the following commands:
|
|
|
|
{{- if .Values.prometheus.ingress.enabled }}
|
|
|
|
You should be able to access your new Prometheus installation through
|
|
|
|
{{ ternary "https" "http" .Values.prometheus.ingress.tls }}://{{ .Values.prometheus.ingress.hostname }}
|
|
|
|
{{- else if contains "LoadBalancer" .Values.prometheus.service.type }}
|
|
|
|
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
|
Watch the status with: 'kubectl get svc --namespace {{ include "common.names.namespace" . }} -w {{ template "kube-prometheus.prometheus.fullname" . }}'
|
|
|
|
{{- $port:=.Values.prometheus.service.ports.http | toString }}
|
|
|
|
export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.names.namespace" . }} {{ template "kube-prometheus.prometheus.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
|
|
echo "Prometheus URL: http://$SERVICE_IP{{- if ne $port "80" }}:{{ .Values.prometheus.service.ports.http }}{{ end }}/"
|
|
|
|
{{- else if contains "ClusterIP" .Values.prometheus.service.type }}
|
|
|
|
echo "Prometheus URL: http://127.0.0.1:9090/"
|
|
kubectl port-forward --namespace {{ include "common.names.namespace" . }} svc/{{ template "kube-prometheus.prometheus.fullname" . }} 9090:{{ .Values.prometheus.service.ports.http }}
|
|
|
|
{{- else if contains "NodePort" .Values.prometheus.service.type }}
|
|
|
|
export NODE_PORT=$(kubectl get --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "kube-prometheus.prometheus.fullname" . }})
|
|
export NODE_IP=$(kubectl get nodes --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
|
echo "Prometheus URL: http://$NODE_IP:$NODE_PORT/"
|
|
|
|
{{- end }}
|
|
|
|
{{- if and .Values.prometheus.thanos.create }}
|
|
|
|
Thanos Sidecar can be accessed via port "{{ .Values.prometheus.thanos.service.ports.grpc }}" on the following DNS name from within your cluster:
|
|
|
|
{{ template "kube-prometheus.thanos.fullname" . }}.{{ include "common.names.namespace" . }}.svc.cluster.local
|
|
|
|
{{- if .Values.prometheus.thanos.ingress.enabled }}
|
|
|
|
You should be able to access your new Thanos Sidecar installation through
|
|
|
|
{{ ternary "https" "http" .Values.prometheus.thanos.ingress.tls }}://{{ .Values.prometheus.thanos.ingress.hostname }}
|
|
|
|
{{- end }}
|
|
{{- end }}
|
|
{{- end }}
|
|
|
|
{{- if .Values.alertmanager.enabled }}
|
|
|
|
Watch the Alertmanager StatefulSet status using the command:
|
|
|
|
kubectl get sts -w --namespace {{ include "common.names.namespace" . }} -l app.kubernetes.io/name={{ template "kube-prometheus.alertmanager.name" . }},app.kubernetes.io/instance={{ .Release.Name }}
|
|
|
|
Alertmanager can be accessed via port "{{ .Values.alertmanager.service.ports.http }}" on the following DNS name from within your cluster:
|
|
|
|
{{ template "kube-prometheus.alertmanager.fullname" . }}.{{ include "common.names.namespace" . }}.svc.cluster.local
|
|
|
|
To access Alertmanager from outside the cluster execute the following commands:
|
|
|
|
{{- if .Values.alertmanager.ingress.enabled }}
|
|
|
|
You should be able to access your new Prometheus installation through
|
|
|
|
{{ ternary "https" "http" .Values.alertmanager.ingress.tls }}://{{ .Values.alertmanager.ingress.hostname }}
|
|
|
|
{{- else if contains "LoadBalancer" .Values.alertmanager.service.type }}
|
|
|
|
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
|
Watch the status with: 'kubectl get svc --namespace {{ include "common.names.namespace" . }} -w {{ template "kube-prometheus.alertmanager.fullname" . }}'
|
|
|
|
{{- $port:=.Values.alertmanager.service.ports.http | toString }}
|
|
|
|
export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.names.namespace" . }} {{ template "kube-prometheus.alertmanager.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
|
|
echo "Alertmanager URL: http://$SERVICE_IP{{- if ne $port "80" }}:{{ .Values.alertmanager.service.ports.http }}{{ end }}/"
|
|
|
|
{{- else if contains "ClusterIP" .Values.alertmanager.service.type }}
|
|
|
|
echo "Alertmanager URL: http://127.0.0.1:9093/"
|
|
kubectl port-forward --namespace {{ include "common.names.namespace" . }} svc/{{ template "kube-prometheus.alertmanager.fullname" . }} 9093:{{ .Values.alertmanager.service.ports.http }}
|
|
|
|
{{- else if contains "NodePort" .Values.alertmanager.service.type }}
|
|
|
|
export NODE_PORT=$(kubectl get --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "kube-prometheus.alertmanager.fullname" . }})
|
|
export NODE_IP=$(kubectl get nodes --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
|
echo "Alertmanager URL: http://$NODE_IP:$NODE_PORT/"
|
|
|
|
{{- end }}
|
|
{{- end }}
|
|
|
|
{{- include "common.warnings.rollingTag" .Values.operator.image }}
|
|
{{- if and .Values.operator.prometheusConfigReloader.image.registry (and .Values.operator.prometheusConfigReloader.image.repository .Values.operator.prometheusConfigReloader.image.tag) }}
|
|
{{- include "common.warnings.rollingTag" .Values.operator.prometheusConfigReloader.image }}
|
|
{{- end }}
|
|
{{- include "common.warnings.rollingTag" .Values.prometheus.image }}
|
|
{{- include "common.warnings.rollingTag" .Values.prometheus.thanos.image }}
|
|
{{- include "common.warnings.rollingTag" .Values.alertmanager.image }}
|
|
{{- include "kube-prometheus.validateValues" . }}
|
|
{{- include "common.warnings.resources" (dict "sections" (list "alertmanager" "blackboxExporter" "operator" "prometheus" "prometheus.thanos" "thanosRuler") "context" $) }}
|
|
{{- include "common.warnings.modifiedImages" (dict "images" (list .Values.operator.image .Values.prometheus.image .Values.prometheus.thanos.image .Values.alertmanager.image .Values.blackboxExporter.image .Values.thanosRuler.image) "context" $) }}
|
|
{{- include "common.errors.insecureImages" (dict "images" (list .Values.operator.image .Values.prometheus.image .Values.prometheus.thanos.image .Values.alertmanager.image .Values.blackboxExporter.image .Values.thanosRuler.image) "context" $) }}
|