# Copyright Broadcom, Inc. All Rights Reserved. # SPDX-License-Identifier: APACHE-2.0 ## @section Global parameters ## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value ## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass ## @param global.imageRegistry Global Docker image registry ## @param global.imagePullSecrets Global Docker registry secret names as an array ## @param global.storageClass Global StorageClass for Persistent Volume(s) ## global: imageRegistry: "" ## E.g. ## imagePullSecrets: ## - myRegistryKeySecretName ## imagePullSecrets: [] storageClass: "" ## Compatibility adaptations for Kubernetes platforms ## compatibility: ## Compatibility adaptations for Openshift ## openshift: ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) ## adaptSecurityContext: auto ## @section Common parameters ## ## @param kubeVersion Override Kubernetes version ## kubeVersion: "" ## @param nameOverride String to partially override common.names.fullname ## nameOverride: "" ## @param fullnameOverride String to fully override common.names.fullname ## fullnameOverride: "" ## @param commonLabels Labels to add to all deployed objects ## commonLabels: {} ## @param commonAnnotations Annotations to add to all deployed objects ## commonAnnotations: {} ## @param clusterDomain Kubernetes cluster domain name ## clusterDomain: cluster.local ## @param extraDeploy Array of extra objects to deploy with the release ## extraDeploy: [] ## @param rbac.singleNamespace Restrict Argo to only deploy into a single namespace by apply Roles and RoleBindings instead of the Cluster equivalents, and start argo-cli with the --namespaced flag. Use it in clusters with strict access policy. ## rbac: singleNamespace: false ## @param createAggregateRoles Create Aggregated cluster roles ## createAggregateRoles: true ## @section Argo Workflows Server configuration parameters ## ## Argo Workflows server (based on the CLI) image ## server: ## Bitnami Argo Workflow CLI image ## ref: https://hub.docker.com/r/bitnami/argo-workflow-cli/tags/ ## @param server.image.registry [default: REGISTRY_NAME] server image registry ## @param server.image.repository [default: REPOSITORY_NAME/argo-workflow-cli] server image repository ## @skip server.image.tag server image tag (immutable tags are recommended) ## @param server.image.digest server image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag ## @param server.image.pullPolicy server image pull policy ## @param server.image.pullSecrets server image pull secrets ## image: registry: docker.io repository: bitnami/argo-workflow-cli tag: 3.5.7-debian-12-r1 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## e.g: ## pullSecrets: ## - myRegistryKeySecretName ## pullSecrets: [] ## @param server.enabled Enable server deployment ## enabled: true ## @param server.replicaCount Number of server replicas to deploy ## replicaCount: 1 ## Configure extra options for server containers' liveness and readiness probes ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes ## @param server.livenessProbe.enabled Enable livenessProbe on server nodes ## @param server.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe ## @param server.livenessProbe.periodSeconds Period seconds for livenessProbe ## @param server.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe ## @param server.livenessProbe.failureThreshold Failure threshold for livenessProbe ## @param server.livenessProbe.successThreshold Success threshold for livenessProbe ## livenessProbe: enabled: true initialDelaySeconds: 10 periodSeconds: 20 timeoutSeconds: 1 failureThreshold: 3 successThreshold: 1 ## @param server.readinessProbe.enabled Enable readinessProbe on server nodes ## @param server.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe ## @param server.readinessProbe.periodSeconds Period seconds for readinessProbe ## @param server.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe ## @param server.readinessProbe.failureThreshold Failure threshold for readinessProbe ## @param server.readinessProbe.successThreshold Success threshold for readinessProbe ## readinessProbe: enabled: true initialDelaySeconds: 10 periodSeconds: 20 timeoutSeconds: 1 failureThreshold: 3 successThreshold: 1 ## @param server.startupProbe.enabled Enable startupProbe ## @param server.startupProbe.path Path to check for startupProbe ## @param server.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe ## @param server.startupProbe.periodSeconds Period seconds for startupProbe ## @param server.startupProbe.timeoutSeconds Timeout seconds for startupProbe ## @param server.startupProbe.failureThreshold Failure threshold for startupProbe ## @param server.startupProbe.successThreshold Success threshold for startupProbe ## startupProbe: enabled: false path: / initialDelaySeconds: 300 periodSeconds: 10 timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 ## @param server.customLivenessProbe Server custom livenessProbe that overrides the default one ## customLivenessProbe: {} ## @param server.customReadinessProbe Server custom readinessProbe that overrides the default one ## customReadinessProbe: {} ## @param server.customStartupProbe Server custom startupProbe that overrides the default one ## customStartupProbe: {} ## server resource requests and limits ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ ## @param server.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if server.resources is set (server.resources is recommended for production). ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 ## resourcesPreset: "nano" ## @param server.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) ## Example: ## resources: ## requests: ## cpu: 2 ## memory: 512Mi ## limits: ## cpu: 3 ## memory: 1024Mi ## resources: {} ## Configure Pods Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod ## @param server.podSecurityContext.enabled Enabled server pods' Security Context ## @param server.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy ## @param server.podSecurityContext.sysctls Set kernel settings using the sysctl interface ## @param server.podSecurityContext.supplementalGroups Set filesystem extra groups ## @param server.podSecurityContext.fsGroup Set server pod's Security Context fsGroup ## podSecurityContext: enabled: true fsGroupChangePolicy: Always sysctls: [] supplementalGroups: [] fsGroup: 1001 ## Configure Container Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod ## @param server.containerSecurityContext.enabled Enabled server containers' Security Context ## @param server.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container ## @param server.containerSecurityContext.runAsUser Set server containers' Security Context runAsUser ## @param server.containerSecurityContext.runAsGroup Set server containers' Security Context runAsGroup ## @param server.containerSecurityContext.runAsNonRoot Set server containers' Security Context runAsNonRoot ## @param server.containerSecurityContext.readOnlyRootFilesystem Set read only root file system pod's Security Conte ## @param server.containerSecurityContext.privileged Set server container's Security Context privileged ## @param server.containerSecurityContext.allowPrivilegeEscalation Set server container's Security Context allowPrivilegeEscalation ## @param server.containerSecurityContext.capabilities.drop List of capabilities to be dropped ## @param server.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile ## containerSecurityContext: enabled: true seLinuxOptions: {} runAsUser: 1001 runAsGroup: 1001 runAsNonRoot: true privileged: false allowPrivilegeEscalation: false capabilities: drop: ["ALL"] seccompProfile: type: "RuntimeDefault" readOnlyRootFilesystem: true ## Create RBAC resources for the Argo workflows server ## @param server.rbac.create Create RBAC resources for the Argo workflows server ## rbac: create: true ## @param server.extraArgs Extra arguments for the server command line ## extraArgs: "" ## Enable authentication with the specified mode ## Ref: https://argoproj.github.io/argo-workflows/argo-server-auth-mode/ ## auth: ## @param server.auth.enabled Enable authentication ## enabled: true ## @param server.auth.mode Set authentication mode. Either `server`, `client` or `sso`. ## mode: client ## SSO configuration when SSO is specified as a server auth mode. ## All the values are required. SSO is activated by adding --auth-mode=sso to the server command line. ## @param server.auth.sso.enabled Enable SSO configuration for the server auth mode ## @param server.auth.sso.config.issuer Root URL for the OIDC identity provider ## @param server.auth.sso.config.clientId.name Name of the secret containing the OIDC client ID ## @param server.auth.sso.config.clientId.key Key in the secret to obtain the OIDC client ID ## @param server.auth.sso.config.clientSecret.name Name of the secret containing the OIDC client secret ## @param server.auth.sso.config.clientSecret.key Key in the secret to obtain the OIDC client secret ## @param server.auth.sso.config.redirectUrl The OIDC redirect URL. Should be in the form /oauth2/callback. ## @param server.auth.sso.rbac.enabled Create RBAC resources for SSO ## @param server.auth.sso.rbac.secretWhitelist Restricts the secrets that the server can read ## @param server.auth.sso.scopes Scopes requested from the SSO ID provider ## sso: enabled: false ## The root URL of the OIDC identity provider. ## E.g. ## issuer: "https://accounts.google.com" ## config: issuer: "" ## Name of a secret and a key in it to retrieve the app OIDC client ID from. ## clientId: name: "" key: "" ## Name of a secret and a key in it to retrieve the app OIDC client secret from. ## clientSecret: name: "" key: "" ## The OIDC redirect URL. Should be in the form /oauth2/callback. ## redirectUrl: "" ## Create RBAC resources for SSO ## rbac: enabled: true ## When present, restricts secrets the server can read to a given list. ## You can use it to restrict the server to only be able to access the ## service account token secrets that are associated with service accounts ## used for authorization. ## secretWhitelist: [] ## Scopes requested from the SSO ID provider. The 'groups' scope requests ## group membership information, which is usually used for authorization ## decisions. ## scopes: [] ## @param server.clusterWorkflowTemplates.enabled Create ClusterRole and CRB for the controoler to access ClusterWorkflowTemplates ## @param server.clusterWorkflowTemplates.enableEditing Give the server permissions to edit ClusterWorkflowTemplates ## clusterWorkflowTemplates: # Create a ClusterRole and CRB for the controller to access ClusterWorkflowTemplates. enabled: true enableEditing: true ## Pod disruption budget configuration ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ ## @param server.pdb.enabled Create Pod Disruption Budget for the server component ## @param server.pdb.minAvailable Sets the min number of pods availables for the Pod Disruption Budget ## @param server.pdb.maxUnavailable Sets the max number of pods unavailable for the Pod Disruption Budget ## pdb: enabled: false minAvailable: 1 maxUnavailable: 1 ## Run the argo server in "secure" mode. ## Ref: https://argoproj.github.io/argo-workflows/tls/ ## @param server.secure Run Argo server in secure mode ## secure: false ## Base URL for client resources ## Ref: https://github.com/argoproj/argo-workflows/issues/716#issuecomment-433213190 ## @param server.baseHref Base href of the Argo Workflows deployment ## baseHref: / ## Server container port ## @param server.containerPorts.web argo Server container port ## containerPorts: web: 2746 ## Server Service Account ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ ## @param server.serviceAccount.create Specifies whether a ServiceAccount should be created ## @param server.serviceAccount.name Name of the service account to use. If not set and create is true, a name is generated using the fullname template. ## @param server.serviceAccount.automountServiceAccountToken Automount service account token for the server service account ## @param server.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. ## serviceAccount: create: true name: "" automountServiceAccountToken: false annotations: {} ## @param server.command Override default container command (useful when using custom images) ## command: [] ## @param server.args Override default container args (useful when using custom images) ## args: [] ## @param server.automountServiceAccountToken Mount Service Account token in pod ## automountServiceAccountToken: true ## @param server.hostAliases server pods host aliases ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ ## hostAliases: [] ## @param server.podLabels Extra labels for server pods ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ ## podLabels: {} ## @param server.podAnnotations Annotations for server pods ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ ## podAnnotations: {} ## @param server.podAffinityPreset Pod affinity preset. Ignored if `server.affinity` is set. Allowed values: `soft` or `hard` ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity ## podAffinityPreset: "" ## @param server.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `server.affinity` is set. Allowed values: `soft` or `hard` ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity ## podAntiAffinityPreset: soft ## Node server.affinity preset ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity ## nodeAffinityPreset: ## @param server.nodeAffinityPreset.type Node affinity preset type. Ignored if `server.affinity` is set. Allowed values: `soft` or `hard` ## type: "" ## @param server.nodeAffinityPreset.key Node label key to match. Ignored if `server.affinity` is set ## key: "" ## @param server.nodeAffinityPreset.values Node label values to match. Ignored if `server.affinity` is set ## E.g. ## values: ## - e2e-az1 ## - e2e-az2 ## values: [] ## @param server.affinity Affinity for server pods assignment ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## NOTE: `server.podAffinityPreset`, `server.podAntiAffinityPreset`, and `server.nodeAffinityPreset` will be ignored when it's set ## affinity: {} ## @param server.nodeSelector Node labels for server pods assignment ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ ## nodeSelector: {} ## @param server.tolerations Tolerations for server pods assignment ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## tolerations: [] ## @param server.updateStrategy.type server statefulset strategy type ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies ## updateStrategy: ## StrategyType ## Can be set to RollingUpdate or OnDelete ## type: RollingUpdate ## @param server.topologySpreadConstraints Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ ## ## topologySpreadConstraints: ## - maxSkew: 1 ## topologyKey: failure-domain.beta.kubernetes.io/zone ## whenUnsatisfiable: DoNotSchedule ## topologySpreadConstraints: [] ## @param server.schedulerName Alternate scheduler for the server deployment ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## schedulerName: "" ## @param server.priorityClassName server pods' priorityClassName ## priorityClassName: "" ## @param server.lifecycleHooks for the server container(s) to automate configuration before or after startup ## lifecycleHooks: {} ## @param server.extraEnvVars Array with extra environment variables to add to server nodes ## e.g: ## extraEnvVars: ## - name: FOO ## value: "bar" ## extraEnvVars: [] ## @param server.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for server nodes ## extraEnvVarsCM: "" ## @param server.extraEnvVarsSecret Name of existing Secret containing extra env vars for server nodes ## extraEnvVarsSecret: "" ## @param server.extraVolumes Optionally specify extra list of additional volumes for the server pod(s) ## extraVolumes: [] ## @param server.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the server container(s) ## extraVolumeMounts: [] ## @param server.sidecars Add additional sidecar containers to the server pod(s) ## e.g: ## sidecars: ## - name: your-image-name ## image: your-image ## imagePullPolicy: Always ## ports: ## - name: portname ## containerPort: 1234 ## sidecars: [] ## @param server.initContainers Add additional init containers to the server pod(s) ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ ## e.g: ## initContainers: ## - name: your-image-name ## image: your-image ## imagePullPolicy: Always ## command: ['sh', '-c', 'echo "hello world"'] ## initContainers: [] ## Server service parameters ## service: ## @param server.service.type server service type ## type: ClusterIP ## @param server.service.ports.http server service HTTP port ## ports: http: 80 ## Node ports to expose ## @param server.service.nodePorts.http Node port for HTTP ## NOTE: choose port between <30000-32767> ## nodePorts: http: "" ## @param server.service.clusterIP server service Cluster IP ## e.g.: ## clusterIP: None ## clusterIP: "" ## @param server.service.loadBalancerIP server service Load Balancer IP ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer ## loadBalancerIP: "" ## @param server.service.loadBalancerSourceRanges server service Load Balancer sources ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service ## e.g: ## loadBalancerSourceRanges: ## - 10.10.10.0/24 ## loadBalancerSourceRanges: [] ## @param server.service.externalTrafficPolicy server service external traffic policy ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip ## externalTrafficPolicy: Cluster ## @param server.service.annotations Additional custom annotations for server service ## annotations: {} ## @param server.service.extraPorts Extra port to expose on the server service ## extraPorts: [] ## Network Policies ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ ## networkPolicy: ## @param server.networkPolicy.enabled Specifies whether a NetworkPolicy should be created ## enabled: true ## @param server.networkPolicy.allowExternal Don't require server label for connections ## The Policy model to apply. When set to false, only pods with the correct ## server label will have network access to the ports server is listening ## on. When true, server will accept connections from any source ## (with the correct destination port). ## allowExternal: true ## @param server.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. ## allowExternalEgress: true ## @param server.networkPolicy.kubeAPIServerPorts [array] List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security) ## kubeAPIServerPorts: [443, 6443, 8443] ## @param server.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy ## e.g: ## extraIngress: ## - ports: ## - port: 1234 ## from: ## - podSelector: ## - matchLabels: ## - role: frontend ## - podSelector: ## - matchExpressions: ## - key: role ## operator: In ## values: ## - frontend extraIngress: [] ## @param server.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy (ignored if allowExternalEgress=true) ## e.g: ## extraEgress: ## - ports: ## - port: 1234 ## to: ## - podSelector: ## - matchLabels: ## - role: frontend ## - podSelector: ## - matchExpressions: ## - key: role ## operator: In ## values: ## - frontend ## extraEgress: [] ## @param server.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces ## @param server.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces ## ingressNSMatchLabels: {} ingressNSPodMatchLabels: {} ## @section Argo Workflows Controller configuration parameters ## ## Argo Workflows Controller ## controller: ## Bitnami Argo Workflow Controller image ## ref: https://hub.docker.com/r/bitnami/argo-workflow-controller/tags/ ## @param controller.image.registry [default: REGISTRY_NAME] controller image registry ## @param controller.image.repository [default: REPOSITORY_NAME/argo-workflow-controller] controller image repository ## @skip controller.image.tag controller image tag (immutable tags are recommended) ## @param controller.image.digest controller image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag ## @param controller.image.pullPolicy controller image pull policy ## @param controller.image.pullSecrets controller image pull secrets ## image: registry: docker.io repository: bitnami/argo-workflow-controller tag: 3.5.7-debian-12-r1 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## e.g: ## pullSecrets: ## - myRegistryKeySecretName ## pullSecrets: [] ## @param controller.replicaCount Number of controller replicas to deploy ## replicaCount: 1 ## Configure extra options for controller containers' liveness and readiness probes ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes ## @param controller.livenessProbe.enabled Enable livenessProbe on controller nodes ## @param controller.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe ## @param controller.livenessProbe.periodSeconds Period seconds for livenessProbe ## @param controller.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe ## @param controller.livenessProbe.failureThreshold Failure threshold for livenessProbe ## @param controller.livenessProbe.successThreshold Success threshold for livenessProbe ## livenessProbe: enabled: true initialDelaySeconds: 90 periodSeconds: 60 timeoutSeconds: 30 failureThreshold: 3 successThreshold: 1 ## @param controller.readinessProbe.enabled Enable readinessProbe on controller nodes ## @param controller.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe ## @param controller.readinessProbe.periodSeconds Period seconds for readinessProbe ## @param controller.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe ## @param controller.readinessProbe.failureThreshold Failure threshold for readinessProbe ## @param controller.readinessProbe.successThreshold Success threshold for readinessProbe ## readinessProbe: enabled: true initialDelaySeconds: 30 periodSeconds: 60 timeoutSeconds: 30 failureThreshold: 3 successThreshold: 1 ## @param controller.startupProbe.enabled Enable startupProbe ## @param controller.startupProbe.path Path to check for startupProbe ## @param controller.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe ## @param controller.startupProbe.periodSeconds Period seconds for startupProbe ## @param controller.startupProbe.timeoutSeconds Timeout seconds for startupProbe ## @param controller.startupProbe.failureThreshold Failure threshold for startupProbe ## @param controller.startupProbe.successThreshold Success threshold for startupProbe ## startupProbe: enabled: false path: / initialDelaySeconds: 300 periodSeconds: 10 timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 ## @param controller.customLivenessProbe Controller custom livenessProbe that overrides the default one ## customLivenessProbe: {} ## @param controller.customReadinessProbe Controller custom readinessProbe that overrides the default one ## customReadinessProbe: {} ## @param controller.customStartupProbe Controller custom startupProbe that overrides the default one ## customStartupProbe: {} ## controller resource requests and limits ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ ## @param controller.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if controller.resources is set (controller.resources is recommended for production). ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 ## resourcesPreset: "nano" ## @param controller.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) ## Example: ## resources: ## requests: ## cpu: 2 ## memory: 512Mi ## limits: ## cpu: 3 ## memory: 1024Mi ## resources: {} ## Configure Pods Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod ## @param controller.podSecurityContext.enabled Enabled controller pods' Security Context ## @param controller.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy ## @param controller.podSecurityContext.sysctls Set kernel settings using the sysctl interface ## @param controller.podSecurityContext.supplementalGroups Set filesystem extra groups ## @param controller.podSecurityContext.fsGroup Set controller pod's Security Context fsGroup ## podSecurityContext: enabled: true fsGroupChangePolicy: Always sysctls: [] supplementalGroups: [] fsGroup: 1001 ## Configure Container Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod ## @param controller.containerSecurityContext.enabled Enabled controller containers' Security Context ## @param controller.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container ## @param controller.containerSecurityContext.runAsUser Set controller containers' Security Context runAsUser ## @param controller.containerSecurityContext.runAsGroup Set controller containers' Security Context runAsGroup ## @param controller.containerSecurityContext.runAsNonRoot Set controller containers' Security Context runAsNonRoot ## @param controller.containerSecurityContext.readOnlyRootFilesystem Set read only root file system pod's Security Conte ## @param controller.containerSecurityContext.privileged Set controller container's Security Context privileged ## @param controller.containerSecurityContext.allowPrivilegeEscalation Set controller container's Security Context allowPrivilegeEscalation ## @param controller.containerSecurityContext.capabilities.drop List of capabilities to be dropped ## @param controller.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile ## containerSecurityContext: enabled: true seLinuxOptions: {} runAsUser: 1001 runAsGroup: 1001 runAsNonRoot: true privileged: false allowPrivilegeEscalation: false capabilities: drop: ["ALL"] seccompProfile: type: "RuntimeDefault" readOnlyRootFilesystem: true ## Server container port ## @param controller.containerPorts.metrics Port to expose controller metrics ## @param controller.containerPorts.telemetry Port to expose controller telemetry ## containerPorts: metrics: 9090 telemetry: 8081 ## Create RBAC resources for the Argo workflows controller ## @param controller.rbac.create Create RBAC resources for the Argo workflows controller ## rbac: create: true ## Use an existing configmap instead of creating a configmap with the specified parameters ## @param controller.existingConfigMap ## existingConfigMap: "" ## @param controller.extraArgs Extra arguments for the controller command line ## extraArgs: "" ## ## @param controller.persistence.archive.enabled Save completed workflows to an SQL database. persistence: archive: enabled: false ## Controller configmap configuration content. Requires controller.existingConfigmap to be empty. ## @param controller.config [object] Controller configmap configuration content ## config: | {{- if .Values.controller.instanceID.enabled }} {{- if .Values.controller.instanceID.useReleaseName }} instanceID: {{ .Release.Name }} {{- else }} instanceID: {{ .Values.controller.instanceID.explicitID }} {{- end }} {{- end }} ## How many workflows can be running at the same time ## parallelism: ## Maximun number of workflows running in a namespace ## namespaceParallelism: {{- if or .Values.executor.resources .Values.executor.extraEnvVars .Values.executor.containerSecurityContext }} executor: {{- if .Values.executor.resources }} resources: {{- include "common.tplvalues.render" (dict "value" .Values.executor.resources "context" $) | nindent 4 }} {{- else if ne .Values.executor.resourcesPreset "none" }} resources: {{- include "common.resources.preset" (dict "type" .Values.executor.resourcesPreset) | nindent 4 }} {{- end }} {{- if .Values.executor.extraEnvVars }} env: {{- include "common.tplvalues.render" (dict "value" .Values.executor.extraEnvVars "context" $) | nindent 4 }} {{- end }} {{- if .Values.executor.containerSecurityContext }} securityContext: {{- omit .Values.executor.containerSecurityContext "enabled" | toYaml | nindent 4 }} {{- end }} {{- end }} ## Uncomment to enable Artofact repository with the provided configuration ## artifactRepository: ## archiveLogs: false ## configuration: {} ## {{- if .Values.controller.metrics.enabled }} metricsConfig: {{- include "common.tplvalues.render" (dict "value" .Values.controller.metrics "context" $) | nindent 2 }} {{- end }} {{- if .Values.controller.telemetry.enabled }} telemetryConfig: {{- include "common.tplvalues.render" (dict "value" .Values.controller.telemetry "context" $) | nindent 2 }} {{- end }} {{- if (include "argo-workflows.controller.persistence.enabled" .) }} persistence: connectionPool: maxIdleConns: 100 maxOpenConns: 0 nodeStatusOffLoad: false archive: {{ include "common.tplvalues.render" (dict "value" .Values.controller.persistence.archive.enabled "context" $) }} {{- if or .Values.postgresql.enabled (and .Values.externalDatabase.enabled (eq .Values.externalDatabase.type "postgresql")) }} postgresql: {{- else if or .Values.mysql.enabled (and .Values.externalDatabase.enabled (eq .Values.externalDatabase.type "mysql")) }} mysql: {{- end }} host: {{ include "argo-workflows.controller.database.host" . }} port: {{ include "argo-workflows.controller.database.port" . }} database: {{ include "argo-workflows.controller.database" . }} tableName: argo_workflows ## the database secrets must be in the same namespace of the controller ## userNameSecret: name: {{ include "argo-workflows.controller.database.username.secret" . }} key: username passwordSecret: name: {{ include "argo-workflows.controller.database.password.secret" . }} key: {{ include "argo-workflows.controller.database.password.secret.key" . }} {{- end }} {{- if .Values.controller.workflowDefaults }} workflowDefaults: {{- include "common.tplvalues.render" (dict "value" .Values.controller.workflowDefaults "context" $) | nindent 2 }} {{- end }} {{- if and .Values.server.auth.enabled .Values.server.auth.sso.enabled }} sso: {{- include "common.tplvalues.render" (dict "value" .Values.server.auth.sso.config "context" $) | nindent 2 }} {{- end }} ## Uncomment to set workflowRestrictions ## Ref: https://argoproj.github.io/argo-workflows/workflow-restrictions/ ## workflowRestrictions: {} ## Uncomment to set links ## Ref: https://argoproj.github.io/argo-workflows/links/ ## links: {} ## ## Configure the controller to accept only submissions with a matching instanceID attribute ## @param controller.instanceID.enabled Enable submission filtering based on instanceID attribute. Requires to set instanceID.useReleaseName or instanceID.explicitID ## @param controller.instanceID.useReleaseName Use the release name to filter submissions ## @param controller.instanceID.explicitID Filter submissions based on an explicit instance ID ## instanceID: enabled: false ## NOTE: If `instanceID.enabled` is set to `true` then either `instanceID.userReleaseName` or `instanceID.explicitID` must be defined. ## useReleaseName: false explicitID: "" ## @param controller.clusterWorkflowTemplates.enabled Whether to create a ClusterRole and Cluster Role Binding to access ClusterWokflowTemplates resources ## clusterWorkflowTemplates: enabled: true ## Metrics configuration for the controller ## @param controller.metrics.enabled Enable controller metrics exporter ## @param controller.metrics.path Path to expose controller metrics ## @param controller.metrics.serviceMonitor.enabled Enable prometheus service monitor configuration ## metrics: enabled: false path: /metrics serviceMonitor: enabled: false ## Telemetry configuration for the controller ## @param controller.telemetry.enabled Enable telemetry for the controller ## @param controller.telemetry.path Path to expose telemetry information ## telemetry: enabled: false path: /telemetry ## Number of workflow workers to deploy ## @param controller.workflowWorkers Number of workflow workers to deploy ## workflowWorkers: 32 ## Namespaces allowed to run workflows ## @param controller.workflowNamespaces Namespaces allowed to run workflows ## workflowNamespaces: - default ## Default Workflow Values ## Ref: https://argoproj.github.io/argo-workflows/default-workflow-specs/#setting-default-workflow-values ## @param controller.workflowDefaults Default Workflow Values ## workflowDefaults: {} ## Logging level for the controller ## @param controller.logging.level Level for the controller logging ## @param controller.logging.globalLevel Global logging level for the controller ## logging: level: info globalLevel: "0" ## Pod disruption budget configuration ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ ## @param controller.pdb.enabled Create Pod Disruption Budget for the controller component ## @param controller.pdb.minAvailable Sets the min number of pods availables for the Pod Disruption Budget ## @param controller.pdb.maxUnavailable Sets the max number of pods unavailable for the Pod Disruption Budget ## pdb: enabled: false minAvailable: 1 maxUnavailable: 1 ## Controller Service Account ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ ## @param controller.serviceAccount.create Specifies whether a ServiceAccount should be created ## @param controller.serviceAccount.name Name of the service account to use. If not set and create is true, a name is generated using the fullname template. ## @param controller.serviceAccount.automountServiceAccountToken Automount service account token for the server service account ## @param controller.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. ## serviceAccount: create: true name: "" automountServiceAccountToken: false annotations: {} ## @param controller.command Override default container command (useful when using custom images) ## command: [] ## @param controller.args Override default container args (useful when using custom images) ## args: [] ## @param controller.automountServiceAccountToken Mount Service Account token in pod ## automountServiceAccountToken: true ## @param controller.hostAliases controller pods host aliases ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ ## hostAliases: [] ## @param controller.podLabels Extra labels for controller pods ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ ## podLabels: {} ## @param controller.podAnnotations Annotations for controller pods ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ ## podAnnotations: {} ## @param controller.podAffinityPreset Pod affinity preset. Ignored if `controller.affinity` is set. Allowed values: `soft` or `hard` ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity ## podAffinityPreset: "" ## @param controller.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `controller.affinity` is set. Allowed values: `soft` or `hard` ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity ## podAntiAffinityPreset: soft ## Node controller.affinity preset ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity ## nodeAffinityPreset: ## @param controller.nodeAffinityPreset.type Node affinity preset type. Ignored if `controller.affinity` is set. Allowed values: `soft` or `hard` ## type: "" ## @param controller.nodeAffinityPreset.key Node label key to match. Ignored if `controller.affinity` is set ## key: "" ## @param controller.nodeAffinityPreset.values Node label values to match. Ignored if `controller.affinity` is set ## E.g. ## values: ## - e2e-az1 ## - e2e-az2 ## values: [] ## @param controller.affinity Affinity for controller pods assignment ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## NOTE: `controller.podAffinityPreset`, `controller.podAntiAffinityPreset`, and `controller.nodeAffinityPreset` will be ignored when it's set ## affinity: {} ## @param controller.nodeSelector Node labels for controller pods assignment ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ ## nodeSelector: {} ## @param controller.tolerations Tolerations for controller pods assignment ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## tolerations: [] ## @param controller.updateStrategy.type controller statefulset strategy type ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies ## updateStrategy: ## StrategyType ## Can be set to RollingUpdate or OnDelete ## type: RollingUpdate ## @param controller.topologySpreadConstraints Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ ## ## topologySpreadConstraints: ## - maxSkew: 1 ## topologyKey: failure-domain.beta.kubernetes.io/zone ## whenUnsatisfiable: DoNotSchedule ## topologySpreadConstraints: [] ## @param controller.schedulerName Alternate scheduler for the server controller ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## schedulerName: "" ## @param controller.priorityClassName controller pods' priorityClassName ## priorityClassName: "" ## @param controller.lifecycleHooks for the controller container(s) to automate configuration before or after startup ## lifecycleHooks: {} ## @param controller.extraEnvVars Array with extra environment variables to add to controller nodes ## e.g: ## extraEnvVars: ## - name: FOO ## value: "bar" ## extraEnvVars: [] ## @param controller.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for controller nodes ## extraEnvVarsCM: "" ## @param controller.extraEnvVarsSecret Name of existing Secret containing extra env vars for controller nodes ## extraEnvVarsSecret: "" ## @param controller.extraVolumes Optionally specify extra list of additional volumes for the controller pod(s) ## extraVolumes: [] ## @param controller.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the controller container(s) ## extraVolumeMounts: [] ## @param controller.sidecars Add additional sidecar containers to the controller pod(s) ## e.g: ## sidecars: ## - name: your-image-name ## image: your-image ## imagePullPolicy: Always ## ports: ## - name: portname ## containerPort: 1234 ## sidecars: [] ## @param controller.initContainers Add additional init containers to the controller pod(s) ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ ## e.g: ## initContainers: ## - name: your-image-name ## image: your-image ## imagePullPolicy: Always ## command: ['sh', '-c', 'echo "hello world"'] ## initContainers: [] ## Controller service parameters ## service: ## @param controller.service.type controller service type ## type: ClusterIP ## @param controller.service.ports.metrics Metrics port for the controller ## @param controller.service.ports.telemetry Telemetry port for the controller ## ports: metrics: 8080 telemetry: 8081 ## Node ports to expose ## @param controller.service.nodePorts.metrics Node port for HTTP ## @param controller.service.nodePorts.telemetry Node port for HTTPS ## NOTE: choose port between <30000-32767> ## nodePorts: metrics: "" telemetry: "" ## @param controller.service.clusterIP controller service Cluster IP ## e.g.: ## clusterIP: None ## clusterIP: "" ## @param controller.service.loadBalancerIP controller service Load Balancer IP ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer ## loadBalancerIP: "" ## @param controller.service.loadBalancerSourceRanges controller service Load Balancer sources ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service ## e.g: ## loadBalancerSourceRanges: ## - 10.10.10.0/24 ## loadBalancerSourceRanges: [] ## @param controller.service.externalTrafficPolicy controller service external traffic policy ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip ## externalTrafficPolicy: Cluster ## @param controller.service.annotations Additional custom annotations for controller service ## annotations: {} ## @param controller.service.extraPorts Extra port to expose on the controller service ## extraPorts: [] ## Network Policies ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ ## networkPolicy: ## @param controller.networkPolicy.enabled Specifies whether a NetworkPolicy should be created ## enabled: true ## @param controller.networkPolicy.allowExternal Don't require server label for connections ## The Policy model to apply. When set to false, only pods with the correct ## server label will have network access to the ports server is listening ## on. When true, server will accept connections from any source ## (with the correct destination port). ## allowExternal: true ## @param controller.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. ## allowExternalEgress: true ## @param controller.networkPolicy.kubeAPIServerPorts [array] List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security) ## kubeAPIServerPorts: [443, 6443, 8443] ## @param controller.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy ## e.g: ## extraIngress: ## - ports: ## - port: 1234 ## from: ## - podSelector: ## - matchLabels: ## - role: frontend ## - podSelector: ## - matchExpressions: ## - key: role ## operator: In ## values: ## - frontend extraIngress: [] ## @param controller.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy (ignored if allowExternalEgress=true) ## e.g: ## extraEgress: ## - ports: ## - port: 1234 ## to: ## - podSelector: ## - matchLabels: ## - role: frontend ## - podSelector: ## - matchExpressions: ## - key: role ## operator: In ## values: ## - frontend ## extraEgress: [] ## @param controller.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces ## @param controller.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces ## ingressNSMatchLabels: {} ingressNSPodMatchLabels: {} ## @section Executor configuration section ## executor: ## Bitnami Argo Workflow Executor image ## ref: https://hub.docker.com/r/bitnami/argo-workflow-exec/tags/ ## @param executor.image.registry [default: REGISTRY_NAME] executor image registry ## @param executor.image.repository [default: REPOSITORY_NAME/argo-workflow-exec] executor image repository ## @skip executor.image.tag executor image tag (immutable tags are recommended) ## @param executor.image.digest executor image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag ## @param executor.image.pullPolicy executor image pull policy ## @param executor.image.pullSecrets executor image pull secrets ## image: registry: docker.io repository: bitnami/argo-workflow-exec tag: 3.5.7-debian-12-r1 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## e.g: ## pullSecrets: ## - myRegistryKeySecretName ## pullSecrets: [] ## Init container's resource requests and limits ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ ## @param executor.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if executor.resources is set (executor.resources is recommended for production). ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 ## resourcesPreset: "nano" ## @param executor.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) ## Example: ## resources: ## requests: ## cpu: 2 ## memory: 512Mi ## limits: ## cpu: 3 ## memory: 1024Mi ## resources: {} ## @param executor.extraEnvVars Array with extra environment variables to add to server nodes ## e.g: ## extraEnvVars: ## - name: FOO ## value: "bar" ## extraEnvVars: [] ## Configure Container Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod ## @param executor.containerSecurityContext.enabled Enabled executor containers' Security Context ## @param executor.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container ## @param executor.containerSecurityContext.runAsUser Set executor containers' Security Context runAsUser ## @param executor.containerSecurityContext.runAsGroup Set executor containers' Security Context runAsGroup ## @param executor.containerSecurityContext.runAsNonRoot Set executor containers' Security Context runAsNonRoot ## @param executor.containerSecurityContext.readOnlyRootFilesystem Set read only root file system pod's Security Conte ## @param executor.containerSecurityContext.privileged Set executor container's Security Context privileged ## @param executor.containerSecurityContext.allowPrivilegeEscalation Set executor container's Security Context allowPrivilegeEscalation ## @param executor.containerSecurityContext.capabilities.drop List of capabilities to be dropped ## @param executor.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile ## containerSecurityContext: enabled: true seLinuxOptions: {} runAsUser: 1001 runAsGroup: 1001 runAsNonRoot: true privileged: false allowPrivilegeEscalation: false capabilities: drop: ["ALL"] seccompProfile: type: "RuntimeDefault" readOnlyRootFilesystem: true ## @section Traffic Exposure Parameters ## ## Server ingress parameters ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ ## ingress: ## @param ingress.enabled Enable ingress record generation for server ## enabled: false ## @param ingress.pathType Ingress path type ## pathType: ImplementationSpecific ## @param ingress.apiVersion Force Ingress API version (automatically detected if not set) ## apiVersion: "" ## @param ingress.hostname Default host for the ingress record ## hostname: server.local ## @param ingress.path Default path for the ingress record ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers ## path: / ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster. ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ ## ingressClassName: "" ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. ## For a full list of possible ingress annotations, please see ## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md ## Use this parameter to set the required annotations for cert-manager, see ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations ## ## e.g: ## annotations: ## kubernetes.io/ingress.class: nginx ## cert-manager.io/cluster-issuer: cluster-issuer-name ## annotations: {} ## @param ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}` ## You can: ## - Use the `ingress.secrets` parameter to create this TLS secret ## - Rely on cert-manager to create it by setting the corresponding annotations ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` ## tls: false ## DEPRECATED: Use ingress.annotations instead of ingress.certManager ## certManager: false ## ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm ## selfSigned: false ## @param ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record ## e.g: ## extraHosts: ## - name: server.local ## path: / ## extraHosts: [] ## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host ## e.g: ## extraPaths: ## - path: /* ## backend: ## serviceName: ssl-redirect ## servicePort: use-annotation ## extraPaths: [] ## @param ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls ## e.g: ## extraTls: ## - hosts: ## - server.local ## secretName: server.local-tls ## extraTls: [] ## @param ingress.secrets Custom TLS certificates as secrets ## NOTE: 'key' and 'certificate' are expected in PEM format ## NOTE: 'name' should line up with a 'secretName' set further up ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days ## It is also possible to create and manage the certificates outside of this helm chart ## Please see README.md for more information ## e.g: ## secrets: ## - name: server.local-tls ## key: |- ## -----BEGIN RSA PRIVATE KEY----- ## ... ## -----END RSA PRIVATE KEY----- ## certificate: |- ## -----BEGIN CERTIFICATE----- ## ... ## -----END CERTIFICATE----- ## secrets: [] ## @param ingress.extraRules Additional rules to be covered with this ingress record ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules ## e.g: ## extraRules: ## - host: server.local ## http: ## path: / ## backend: ## service: ## name: server-svc ## port: ## name: http ## extraRules: [] ## @section Workflows configuration ## workflows: ## Service account configuration for workflows ## @param workflows.serviceAccount.create Whether to create a service account to run workflows ## @param workflows.serviceAccount.name Service account name to run workflows ## @param workflows.serviceAccount.automountServiceAccountToken Automount service account token for the workflows service account ## @param workflows.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. ## serviceAccount: create: true name: "" automountServiceAccountToken: false annotations: {} ## Create RBAC resources to run workflows. ## A Role and Role Bindding are created per namespace in controller.workflowNamespaces ## @param workflows.rbac.create Whether to create RBAC resource to run workflows ## rbac: create: true ## @section PostgreSQL subchart ## ## Postgresql subchart configuration ## @param postgresql.enabled Enable PostgreSQL subchart and controller persistence using PostgreSQL ## @param postgresql.service.ports.postgresql PostgreSQL port ## @param postgresql.auth.username PostgreSQL username ## @param postgresql.auth.database PortgreSQL database name ## @param postgresql.auth.password PortgreSQL database password ## postgresql: enabled: true service: ports: postgresql: 5432 auth: username: postgres database: bn_argo_workflows password: "" primary: ## PostgreSQL Primary resource requests and limits ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ ## @param postgresql.primary.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if primary.resources is set (primary.resources is recommended for production). ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 ## resourcesPreset: "nano" ## @param postgresql.primary.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) ## Example: ## resources: ## requests: ## cpu: 2 ## memory: 512Mi ## limits: ## cpu: 3 ## memory: 1024Mi ## resources: {} ## @section MySQL subchart ## ## Mysql subchart configuration ## @param mysql.enabled Enable MySQL subchart and controller persistence using MySQL ## @param mysql.service.ports.mysql MySQL port ## @param mysql.auth.username MySQL username ## @param mysql.auth.database MySQL database name ## @param mysql.auth.password MySQL database password ## mysql: enabled: false service: ports: mysql: 3306 auth: username: mysql database: bn_argo_workflows password: "" primary: ## MySQL primary container's resource requests and limits ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ ## We usually recommend not to specify default resources and to leave this as a conscious ## choice for the user. This also increases chances charts run on environments with little ## resources, such as Minikube. If you do want to specify resources, uncomment the following ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. ## @param mysql.primary.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if primary.resources is set (primary.resources is recommended for production). ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 ## resourcesPreset: "small" ## @param mysql.primary.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) ## Example: ## resources: ## requests: ## cpu: 2 ## memory: 512Mi ## limits: ## cpu: 3 ## memory: 1024Mi ## resources: {} ## @section External Database configuration ## ## External Database Configuration ## @param externalDatabase.enabled Enable using externaldatabase and the controller to use persistence with it ## @param externalDatabase.host External Database server host ## @param externalDatabase.port External Database server port ## @param externalDatabase.username External Database username ## @param externalDatabase.password External Database user password ## @param externalDatabase.database External Database database name ## @param externalDatabase.existingSecret The name of an existing secret with database credentials ## @param externalDatabase.type Either postgresql or mysql ## externalDatabase: enabled: false ## Either "postgresql" or "mysql" ## type: "" host: localhost port: 3306 username: bn_workflows password: "" database: bitnami_workflows ## NOTE: Must contain key `database-password` ## NOTE: When it's set, the `externalDatabase.password` parameter is ignored ## existingSecret: ""