kubeapps: bump chart version to 3.7.0

This commit is contained in:
kubernetes-bitnami
2020-06-04 15:02:12 +00:00
parent dc4ad223c6
commit c0942755d2
16 changed files with 190 additions and 36 deletions

View File

@@ -1,7 +1,7 @@
apiVersion: v1
name: kubeapps
version: 3.6.0
appVersion: v1.10.0
version: 3.7.0
appVersion: v1.10.1
description: Kubeapps is a dashboard for your Kubernetes cluster that makes it easy to deploy and manage applications in your cluster using Helm
icon: https://raw.githubusercontent.com/kubeapps/kubeapps/master/docs/img/logo.png
keywords:

View File

@@ -9,7 +9,8 @@
- Add custom and private chart repositories (supports [ChartMuseum](https://github.com/helm/chartmuseum) and [JFrog Artifactory](https://www.jfrog.com/confluence/display/RTF/Helm+Chart+Repositories))
- Browse and provision external services from the [Service Catalog](https://github.com/kubernetes-incubator/service-catalog) and available Service Brokers
- Connect Helm-based applications to external services with Service Catalog Bindings
- Secure authentication and authorization based on Kubernetes [Role-Based Access Control](https://github.com/kubeapps/kubeapps/blob/master/docs/user/access-control.md)
- Secure authentication to Kubeapps using an [OAuth2/OIDC provider](https://github.com/kubeapps/kubeapps/blob/master/docs/user/using-an-OIDC-provider.md)
- Secure authorization based on Kubernetes [Role-Based Access Control](https://github.com/kubeapps/kubeapps/blob/master/docs/user/access-control.md)
## TL;DR;
@@ -117,7 +118,7 @@ Learn more about how to secure your Kubeapps installation [here](https://github.
### Exposing Externally
> **Note**: The Kubeapps frontend sets up a proxy to the Kubernetes API service, so when when exposing the Kubeapps service to a network external to the Kubernetes cluster (perhaps on an internal or public network), the Kubernetes API will also be exposed on that network. See [#1111](https://github.com/kubeapps/kubeapps/issues/1111) for more details.
> **Note**: The Kubeapps frontend sets up a proxy to the Kubernetes API service which means that when exposing the Kubeapps service to a network external to the Kubernetes cluster (perhaps on an internal or public network), the Kubernetes API will also be exposed for authenticated requests from that network. If you explicitly [use an OAuth2/OIDC provider with Kubeapps](https://github.com/kubeapps/kubeapps/blob/master/docs/user/using-an-OIDC-provider.md) (recommended), then only the configured users trusted by your Identity Provider will be able to reach the Kubernetes API. See [#1111](https://github.com/kubeapps/kubeapps/issues/1111) for more details.
#### LoadBalancer Service

View File

@@ -100,8 +100,8 @@ Create name for the apprepository-controller based on the fullname
{{/*
Create name for the apprepository pre-upgrade job
*/}}
{{- define "kubeapps.apprepository-jobs-preupgrade.fullname" -}}
{{ template "kubeapps.fullname" . }}-internal-apprepository-jobs-preupgrade
{{- define "kubeapps.apprepository-job-postupgrade.fullname" -}}
{{ template "kubeapps.fullname" . }}-internal-apprepository-job-postupgrade
{{- end -}}
{{/*

View File

@@ -5,7 +5,6 @@ metadata:
name: {{ .name }}
annotations:
"helm.sh/hook": post-install
"helm.sh/hook-weight": "10"
labels:
app: {{ template "kubeapps.apprepository.fullname" $ }}
chart: {{ template "kubeapps.chart" $ }}

View File

@@ -38,6 +38,7 @@ spec:
containers:
- name: controller
image: {{ template "kubeapps.image" (list .Values.apprepository.image .Values.global) }}
imagePullPolicy: {{ .Values.apprepository.image.pullPolicy | quote }}
command:
- /apprepository-controller
args:

View File

@@ -38,6 +38,7 @@ spec:
containers:
- name: kubectl
image: {{ template "kubeapps.image" (list .Values.hooks.image .Values.global) }}
imagePullPolicy: {{ .Values.hooks.image.pullPolicy | quote }}
command:
- /bin/sh
args:

View File

@@ -0,0 +1,40 @@
{{- if .Values.rbac.create -}}
# Helm 3.1 supports a lookup template tag to create a secret if it does not exist
# but we can't yet restrict to helm 3.1, hence manually doing this with an initContainer.
# in the post upgrade job.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ template "kubeapps.apprepository-job-postupgrade.fullname" . }}
labels:
app: {{ template "kubeapps.apprepository.fullname" . }}
chart: {{ template "kubeapps.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ template "kubeapps.apprepository-job-postupgrade.fullname" . }}
labels:
app: {{ template "kubeapps.apprepository.fullname" . }}
chart: {{ template "kubeapps.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "kubeapps.apprepository-job-postupgrade.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "kubeapps.apprepository-job-postupgrade.fullname" . }}
namespace: {{ .Release.Namespace }}
{{- end -}}

View File

@@ -1,15 +1,15 @@
{{- if .Values.featureFlags.invalidateCache }}
# Ensure db indexes are set and invalidate the chart cache during both install and upgrade.
# Ensure db indexes are set and invalidate the chart when upgrading.
apiVersion: batch/v1
kind: Job
metadata:
name: {{ template "kubeapps.apprepository-jobs-preupgrade.fullname" . }}
name: {{ template "kubeapps.apprepository-job-postupgrade.fullname" . }}
annotations:
helm.sh/hook: pre-upgrade,post-install
helm.sh/hook: post-upgrade
helm.sh/hook-weight: "0"
helm.sh/hook-delete-policy: hook-succeeded
labels:
app: {{ template "kubeapps.apprepository-jobs-preupgrade.fullname" . }}
app: {{ template "kubeapps.apprepository-job-postupgrade.fullname" . }}
chart: {{ template "kubeapps.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
@@ -17,7 +17,7 @@ spec:
template:
metadata:
labels:
app: {{ template "kubeapps.apprepository-jobs-preupgrade.fullname" . }}
app: {{ template "kubeapps.apprepository-job-postupgrade.fullname" . }}
release: {{ .Release.Name }}
spec:
{{- include "kubeapps.imagePullSecrets" . | indent 6 }}
@@ -36,9 +36,11 @@ spec:
runAsUser: {{ .Values.securityContext.runAsUser }}
{{- end }}
restartPolicy: OnFailure
serviceAccountName: {{ template "kubeapps.apprepository-job-postupgrade.fullname" . }}
containers:
- name: invalidate-cache
image: {{ template "kubeapps.image" (list .Values.apprepository.syncImage .Values.global) }}
imagePullPolicy: {{ .Values.apprepository.syncImage.pullPolicy | quote }}
command:
- /asset-syncer
args:
@@ -67,4 +69,17 @@ spec:
key: postgresql-password
name: {{ .Values.postgresql.existingSecret }}
{{- end }}
{{- if and .Values.postgresql.enabled .Values.postgresql.existingSecret }}
initContainers:
# Helm 3.1 supports a lookup template tag to create a secret if it does not exist
# but we can't yet restrict to helm 3.1, hence manually doing this with an initContainer.
- name: ensure-postgres-password
image: {{ template "kubeapps.image" (list .Values.hooks.image .Values.global) }}
imagePullPolicy: {{ .Values.hooks.image.pullPolicy | quote }}
command:
- /bin/bash
args:
- -c
- 'kubectl -n {{ .Release.Namespace }} get secret {{ .Values.postgresql.existingSecret }} || kubectl -n {{ .Release.Namespace }} create secret generic {{ .Values.postgresql.existingSecret }} --from-literal=postgresql-password={{ randAlphaNum 10 | quote }} --from-literal=postgresql-replication-password={{ randAlphaNum 10 | quote }}'
{{- end }}
{{- end }}

View File

@@ -7,3 +7,13 @@ metadata:
chart: {{ template "kubeapps.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "kubeapps.apprepository-job-postupgrade.fullname" . }}
labels:
app: {{ template "kubeapps.apprepository.fullname" . }}
chart: {{ template "kubeapps.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}

View File

@@ -37,6 +37,7 @@ spec:
containers:
- name: assetsvc
image: {{ template "kubeapps.image" (list .Values.assetsvc.image .Values.global) }}
imagePullPolicy: {{ .Values.assetsvc.image.pullPolicy | quote }}
command:
- /assetsvc
{{- if .Values.mongodb.enabled }}

View File

@@ -39,6 +39,7 @@ spec:
containers:
- name: dashboard
image: {{ template "kubeapps.image" (list .Values.dashboard.image .Values.global) }}
imagePullPolicy: {{ .Values.dashboard.image.pullPolicy | quote }}
{{- if .Values.dashboard.livenessProbe }}
livenessProbe: {{- toYaml .Values.dashboard.livenessProbe | nindent 12 }}
{{- end }}

View File

@@ -37,6 +37,7 @@ spec:
containers:
- name: kubectl
image: {{ template "kubeapps.image" (list .Values.hooks.image .Values.global) }}
imagePullPolicy: {{ .Values.hooks.image.pullPolicy | quote }}
command:
- /bin/sh
args:

View File

@@ -8,6 +8,28 @@ metadata:
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
k8s-api-proxy.conf: |-
# Disable buffering for log streaming
proxy_buffering off;
# Hide Www-Authenticate to prevent it triggering a basic auth prompt in
# the browser with some clusters
proxy_hide_header Www-Authenticate;
# Keep the connection open with the API server even if idle (the default is 60 seconds)
# Setting it to 1 hour which should be enough for our current use case of deploying/upgrading apps
# If we enable other use-cases in the future we might need to bump this value
# More info here https://github.com/kubeapps/kubeapps/issues/766
proxy_read_timeout 1h;
{{- if .Values.frontend.proxypassAccessTokenAsBearer }}
# Google Kubernetes Engine requires the access_token as the Bearer when talking to the k8s api server.
proxy_set_header Authorization "Bearer $http_x_forwarded_access_token";
{{- end }}
{{- range .Values.featureFlags.additionalClusters }}
{{- if .certificateAuthorityData }}
{{ .name }}-ca.pem: {{ .certificateAuthorityData }}
{{- end }}
{{- end}}
vhost.conf: |-
# Retain the default nginx handling of requests without a "Connection" header
map $http_upgrade $connection_upgrade {
@@ -29,28 +51,38 @@ data:
return 200 "healthy\n";
}
# The default cluster running on the same cluster as Kubeapps.
location ~* /api/clusters/default {
rewrite /api/clusters/default/(.*) /$1 break;
rewrite /api/clusters/default / break;
proxy_pass https://kubernetes.default;
include "./server_blocks/k8s-api-proxy.conf";
}
# Ensure each additional cluster can be reached (should only be
# used with an auth-proxy where k8s credentials never leave
# the cluster). See additionalClusters option.
{{- range .Values.featureFlags.additionalClusters }}
location ~* /api/clusters/{{ .name }} {
rewrite /api/clusters/{{ .name }}/(.*) /$1 break;
rewrite /api/clusters/{{ .name }} / break;
proxy_pass {{ .apiServiceURL }};
{{- if .certificateAuthorityData }}
proxy_ssl_trusted_certificate "./server_blocks/{{ .name }}-ca.pem";
{{- end }}
include "./server_blocks/k8s-api-proxy.conf";
}
{{- end }}
# TODO: The following location is left for backwards compat but will no longer
# be needed once clients are sending the cluster name.
# Using regexp match instead of prefix one because the application can be
# deployed under a specific path i.e /kubeapps
location ~* /api/kube {
rewrite /api/kube/(.*) /$1 break;
rewrite /api/kube / break;
proxy_pass https://kubernetes.default;
# Disable buffering for log streaming
proxy_buffering off;
# Hide Www-Authenticate to prevent it triggering a basic auth prompt in
# the browser with some clusters
proxy_hide_header Www-Authenticate;
# Keep the connection open with the API server even if idle (the default is 60 seconds)
# Setting it to 1 hour which should be enough for our current use case of deploying/upgrading apps
# If we enable other use-cases in the future we might need to bump this value
# More info here https://github.com/kubeapps/kubeapps/issues/766
proxy_read_timeout 1h;
{{- if .Values.frontend.proxypassAccessTokenAsBearer }}
# Google Kubernetes Engine requires the access_token as the Bearer when talking to the k8s api server.
proxy_set_header Authorization "Bearer $http_x_forwarded_access_token";
{{- end }}
include "./server_blocks/k8s-api-proxy.conf";
}
location ~* /api/assetsvc {

View File

@@ -39,6 +39,7 @@ spec:
containers:
- name: nginx
image: {{ template "kubeapps.image" (list .Values.frontend.image .Values.global) }}
imagePullPolicy: {{ .Values.frontend.image.pullPolicy | quote }}
{{- if .Values.frontend.livenessProbe }}
livenessProbe: {{- toYaml .Values.frontend.livenessProbe | nindent 12 }}
{{- end }}
@@ -75,6 +76,7 @@ spec:
- {{ . }}
{{- end }}
image: {{ template "kubeapps.image" (list .Values.authProxy.image .Values.global) }}
imagePullPolicy: {{ .Values.authProxy.image.pullPolicy | quote }}
ports:
- name: proxy
containerPort: 3000

View File

@@ -42,6 +42,7 @@ spec:
containers:
- name: kubeops
image: {{ template "kubeapps.image" (list .Values.kubeops.image .Values.global) }}
imagePullPolicy: {{ .Values.kubeops.image.pullPolicy | quote }}
command:
- /kubeops
args:

View File

@@ -106,6 +106,11 @@ frontend:
registry: docker.io
repository: bitnami/nginx
tag: 1.17.10-debian-10-r10
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Frontend service parameters
##
service:
@@ -188,7 +193,11 @@ apprepository:
image:
registry: docker.io
repository: bitnami/kubeapps-apprepository-controller
tag: 1.10.0-scratch-r0
tag: 1.10.1-scratch-r0
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Kubeapps assets synchronization tool
## Image used to perform chart repository syncs
## ref: https://hub.docker.com/r/bitnami/kubeapps-asset-syncer/tags/
@@ -196,7 +205,12 @@ apprepository:
syncImage:
registry: docker.io
repository: bitnami/kubeapps-asset-syncer
tag: 1.10.0-scratch-r0
tag: 1.10.1-scratch-r0
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Initial charts repo proxies to configure
##
initialReposProxy:
@@ -264,6 +278,11 @@ hooks:
registry: docker.io
repository: bitnami/kubectl
tag: 1.16.3-debian-10-r85
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Affinity for hooks' pods assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
@@ -284,7 +303,12 @@ kubeops:
image:
registry: docker.io
repository: bitnami/kubeapps-kubeops
tag: 1.10.0-scratch-r0
tag: 1.10.1-scratch-r0
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
service:
port: 8080
resources:
@@ -326,7 +350,11 @@ tillerProxy:
image:
registry: docker.io
repository: bitnami/kubeapps-tiller-proxy
tag: 1.10.0-scratch-r0
tag: 1.10.1-scratch-r0
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Tiller Proxy service parameters
##
@@ -400,7 +428,12 @@ assetsvc:
image:
registry: docker.io
repository: bitnami/kubeapps-assetsvc
tag: 1.10.0-scratch-r0
tag: 1.10.1-scratch-r0
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Assetsvc service parameters
##
service:
@@ -460,7 +493,12 @@ dashboard:
image:
registry: docker.io
repository: bitnami/kubeapps-dashboard
tag: 1.10.0-debian-10-r0
tag: 1.10.1-debian-10-r0
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Dashboard service parameters
##
service:
@@ -513,7 +551,7 @@ dashboard:
##
mongodb:
## Whether to deploy a mongodb server to satisfy the applications database requirements.
enabled: true
enabled: false
## Kubeapps uses MongoDB as a cache and persistence is not required
##
persistence:
@@ -546,7 +584,7 @@ mongodb:
##
postgresql:
## Whether to deploy a postgresql server to satisfy the applications database requirements.
enabled: false
enabled: true
## Enable replication for high availability
replication:
enabled: true
@@ -607,6 +645,11 @@ authProxy:
registry: docker.io
repository: bitnami/oauth2-proxy
tag: 5.1.0-debian-10-r24
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Mandatory parameters
##
provider: ""
@@ -651,3 +694,9 @@ featureFlags:
reposPerNamespace: true
invalidateCache: true
operators: false
# additionalClusters is a WIP feature for multi-cluster support.
additionalClusters: []
# additionalClusters:
# - name: second-cluster
# apiServiceURL: https://second-cluster:6443
# certificateAuthorityData: LS0tLS1CRUdJ...