Files
charts/bitnami/deepspeed/values.yaml
Fran Mulero c5dd038b10 [bitnami/deepspeed] Enable PodDisruptionBudgets (#26424)
* [bitnami/deepspeed] Enable PodDisruptionBudgets

Signed-off-by: Fran Mulero <fmulero@vmware.com>

* Update CHANGELOG.md

Signed-off-by: Bitnami Containers <bitnami-bot@vmware.com>

---------

Signed-off-by: Fran Mulero <fmulero@vmware.com>
Signed-off-by: Bitnami Containers <bitnami-bot@vmware.com>
Co-authored-by: Bitnami Containers <bitnami-bot@vmware.com>
2024-05-30 07:45:37 +00:00

1097 lines
47 KiB
YAML

## Copyright Broadcom, Inc. All Rights Reserved.
## SPDX-License-Identifier: APACHE-2.0
## @section Global parameters
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
##
## @param global.imageRegistry Global Docker image registry
## @param global.imagePullSecrets Global Docker registry secret names as an array
## @param global.storageClass Global StorageClass for Persistent Volume(s)
##
global:
imageRegistry: ""
## E.g.
## imagePullSecrets:
## - myRegistryKeySecretName
##
imagePullSecrets: []
storageClass: ""
## Compatibility adaptations for Kubernetes platforms
##
compatibility:
## Compatibility adaptations for Openshift
##
openshift:
## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
##
adaptSecurityContext: auto
## @section Common parameters
##
## @param kubeVersion Override Kubernetes version
##
kubeVersion: ""
## @param nameOverride String to partially override common.names.fullname
##
nameOverride: ""
## @param fullnameOverride String to fully override common.names.fullname
##
fullnameOverride: ""
## @param commonLabels Labels to add to all deployed objects
##
commonLabels: {}
## @param commonAnnotations Annotations to add to all deployed objects
##
commonAnnotations: {}
## @param clusterDomain Kubernetes cluster domain name
##
clusterDomain: cluster.local
## @param extraDeploy Array of extra objects to deploy with the release
##
extraDeploy: []
## Enable diagnostic mode in the deployments/statefulsets
##
diagnosticMode:
## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
##
enabled: false
## @param diagnosticMode.command Command to override all containers in the deployments/statefulsets
##
command:
- sleep
## @param diagnosticMode.args Args to override all containers in the deployments/statefulsets
##
args:
- infinity
## @section Source code parameters
## Bitnami Deepspeed image version
## ref: https://hub.docker.com/r/bitnami/deepspeed/tags/
## @param image.registry [default: REGISTRY_NAME] Deepspeed image registry
## @param image.repository [default: REPOSITORY_NAME/deepspeed] Deepspeed image repository
## @skip image.tag Deepspeed image tag (immutable tags are recommended)
## @param image.digest Deepspeed image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
## @param image.pullPolicy Deepspeed image pull policy
## @param image.pullSecrets Specify docker-registry secret names as an array
##
image:
registry: docker.io
repository: bitnami/deepspeed
tag: 0.14.2-debian-12-r3
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Example:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## Source code parameters
##
source:
## @param source.type Where the source comes from: Possible values: configmap, git, custom
##
type: "configmap"
## @param source.launchCommand deepspeed command to run over the project
##
launchCommand: ""
## @param source.configMap List of files of the project
##
configMap: {}
## @param source.existingConfigMap Name of a configmap containing the files of the project
##
existingConfigMap: ""
git:
## @param source.git.repository Repository that holds the files
##
repository: ""
## @param source.git.revision Revision from the repository to checkout
##
revision: ""
## @param source.git.extraVolumeMounts Add extra volume mounts for the Git container
## Useful to mount keys to connect through ssh. (normally used with extraVolumes)
## E.g:
## extraVolumeMounts:
## - name: ssh-dir
## mountPath: /.ssh/
##
extraVolumeMounts: []
## Configuration parameters
##
config:
## @param config.defaultHostFile [string] Host file generated by default (only edit if you know what you are doing)
##
defaultHostFile: |
{{- $workers := $.Values.worker.replicaCount | int }}
{{- range $i, $e := until $workers }}
{{ include "deepspeed.v0.worker.fullname" $ }}-{{ $i }}.{{ printf "%s-headless" (include "deepspeed.v0.worker.fullname" $) }} slots={{ $.Values.worker.slotsPerNode }}
{{- end }}
## @param config.overrideHostFile Override default host file with the content in this value
##
overrideHostFile: ""
## @param config.existingHostFileConfigMap Name of a ConfigMap containing the hostfile
##
existingHostFileConfigMap: ""
## @param config.defaultSSHClient [string] Default SSH client configuration for the client node (only edit if you know what you are doing)
##
defaultSSHClient: |
{{- $workers := $.Values.worker.replicaCount | int }}
{{- range $i, $e := until $workers }}
Host {{ include "deepspeed.v0.worker.fullname" $ }}-{{ $i }}.{{ printf "%s-headless" (include "deepspeed.v0.worker.fullname" $) }}
Port {{ $.Values.worker.containerPorts.ssh }}
IdentityFile /bitnami/ssh/client-private-key/id_rsa
StrictHostKeyChecking no
{{- end }}
## @param config.overrideSSHClient Override default SSH cliient configuration with the content in this value
##
overrideSSHClient: ""
## @param config.existingSSHClientConfigMap Name of a ConfigMap containing the SSH client configuration
##
existingSSHClientConfigMap: ""
## @param config.defaultSSHServer [string] Default SSH Server configuration for the worker nodes (only edit if you know what you are doing)
##
defaultSSHServer: |
Port {{ .Values.worker.containerPorts.ssh }}
PasswordAuthentication no
UsePAM no
PermitUserEnvironment yes
## @param config.overrideSSHServer Overidde SSH Server configuration with the content in this value
##
overrideSSHServer: ""
## @param config.existingSSHServerConfigMap Name of a ConfigMap with with the SSH Server configuration
##
existingSSHServerConfigMap: ""
## @param config.sshPrivateKey Private key for the client node to connect to the worker nodes
##
sshPrivateKey: ""
## @param config.existingSSHKeySecret Name of a secret containing the ssh private key
##
existingSSHKeySecret: ""
## @section Client Deployment Parameters
##
client:
## @param client.enabled Enable Client deployment
##
enabled: true
## @param client.useJob Deploy as job
##
useJob: false
## @param client.backoffLimit set backoff limit of the job
##
backoffLimit: 10
## @param client.extraEnvVars Array with extra environment variables to add to client nodes
## e.g:
## extraEnvVars:
## - name: FOO
## value: "bar"
##
extraEnvVars: []
## @param client.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for client nodes
##
extraEnvVarsCM: ""
## @param client.extraEnvVarsSecret Name of existing Secret containing extra env vars for client nodes
##
extraEnvVarsSecret: ""
## @param client.annotations Annotations for the client deployment
##
annotations: {}
## @param client.command Override default container command (useful when using custom images)
##
command: []
## @param client.args Override default container args (useful when using custom images)
##
args: []
## @param client.terminationGracePeriodSeconds Client termination grace period (in seconds)
##
terminationGracePeriodSeconds: ""
## Configure extra options for Client containers' liveness, readiness and startup probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
## @param client.livenessProbe.enabled Enable livenessProbe on Client nodes
## @param client.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
## @param client.livenessProbe.periodSeconds Period seconds for livenessProbe
## @param client.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
## @param client.livenessProbe.failureThreshold Failure threshold for livenessProbe
## @param client.livenessProbe.successThreshold Success threshold for livenessProbe
##
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 30
timeoutSeconds: 20
failureThreshold: 5
successThreshold: 1
## @param client.readinessProbe.enabled Enable readinessProbe on Client nodes
## @param client.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
## @param client.readinessProbe.periodSeconds Period seconds for readinessProbe
## @param client.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
## @param client.readinessProbe.failureThreshold Failure threshold for readinessProbe
## @param client.readinessProbe.successThreshold Success threshold for readinessProbe
##
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 30
timeoutSeconds: 30
failureThreshold: 5
successThreshold: 1
## @param client.startupProbe.enabled Enable startupProbe on Client containers
## @param client.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
## @param client.startupProbe.periodSeconds Period seconds for startupProbe
## @param client.startupProbe.timeoutSeconds Timeout seconds for startupProbe
## @param client.startupProbe.failureThreshold Failure threshold for startupProbe
## @param client.startupProbe.successThreshold Success threshold for startupProbe
##
startupProbe:
enabled: false
initialDelaySeconds: 5
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 5
successThreshold: 1
## @param client.customLivenessProbe Custom livenessProbe that overrides the default one
##
customLivenessProbe: {}
## @param client.customReadinessProbe Custom readinessProbe that overrides the default one
##
customReadinessProbe: {}
## @param client.customStartupProbe Custom startupProbe that overrides the default one
##
customStartupProbe: {}
## client resource requests and limits
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
## @param client.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if client.resources is set (client.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "small"
## @param client.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
##
resources: {}
## Configure Pods Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param client.podSecurityContext.enabled Enabled Client pods' Security Context
## @param client.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
## @param client.podSecurityContext.sysctls Set kernel settings using the sysctl interface
## @param client.podSecurityContext.supplementalGroups Set filesystem extra groups
## @param client.podSecurityContext.fsGroup Set Client pod's Security Context fsGroup
##
podSecurityContext:
enabled: true
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
fsGroup: 1001
## Configure Container Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param client.containerSecurityContext.enabled Enabled Client containers' Security Context
## @param client.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param client.containerSecurityContext.runAsUser Set Client containers' Security Context runAsUser
## @param client.containerSecurityContext.runAsGroup Set Client containers' Security Context runAsGroup
## @param client.containerSecurityContext.runAsNonRoot Set Client containers' Security Context runAsNonRoot
## @param client.containerSecurityContext.readOnlyRootFilesystem Set Client containers' Security Context runAsNonRoot
## @param client.containerSecurityContext.privileged Set Client containers' Security Context privileged
## @param client.containerSecurityContext.allowPrivilegeEscalation Set Client container's privilege escalation
## @param client.containerSecurityContext.capabilities.drop Set Client container's Security Context runAsNonRoot
## @param client.containerSecurityContext.seccompProfile.type Set Client container's Security Context seccomp profile
##
containerSecurityContext:
enabled: true
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
privileged: false
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
## @param client.lifecycleHooks for the client container(s) to automate configuration before or after startup
##
lifecycleHooks: {}
## @param client.runtimeClassName Name of the runtime class to be used by pod(s)
## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/
##https://github.com/microsoft/DeepSpeedExamples
runtimeClassName: ""
## @param client.automountServiceAccountToken Mount Service Account token in pod
##
automountServiceAccountToken: false
## @param client.hostAliases client pods host aliases
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
##
hostAliases: []
## @param client.labels Extra labels for the client deployment
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
##
labels: {}
## @param client.podLabels Extra labels for client pods
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
##
podLabels: {}
## @param client.podAnnotations Annotations for client pods
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## @param client.podAffinityPreset Pod affinity preset. Ignored if `client.affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param client.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `client.affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## Node client.affinity preset
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
##
nodeAffinityPreset:
## @param client.nodeAffinityPreset.type Node affinity preset type. Ignored if `client.affinity` is set. Allowed values: `soft` or `hard`
##
type: ""
## @param client.nodeAffinityPreset.key Node label key to match. Ignored if `client.affinity` is set
##
key: ""
## @param client.nodeAffinityPreset.values Node label values to match. Ignored if `client.affinity` is set
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## @param client.affinity Affinity for Client pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## NOTE: `client.podAffinityPreset`, `client.podAntiAffinityPreset`, and `client.nodeAffinityPreset` will be ignored when it's set
##
affinity: {}
## @param client.nodeSelector Node labels for Client pods assignment
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
##
nodeSelector: {}
## @param client.tolerations Tolerations for Client pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param client.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
##
topologySpreadConstraints: []
## @param client.priorityClassName Client pods' priorityClassName
##
priorityClassName: ""
## @param client.schedulerName Kubernetes pod scheduler registry
## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
## @param client.updateStrategy.type Client statefulset strategy type
## @param client.updateStrategy.rollingUpdate Client statefulset rolling update configuration parameters
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
##
updateStrategy:
type: RollingUpdate
rollingUpdate: {}
## @param client.extraVolumes Optionally specify extra list of additional volumes for the Client pod(s)
##
extraVolumes: []
## @param client.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Client container(s)
##
extraVolumeMounts: []
## @param client.sidecars Add additional sidecar containers to the Client pod(s)
## e.g:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
sidecars: []
## @param client.enableDefaultInitContainers Deploy default init containers
##
enableDefaultInitContainers: true
## @param client.initContainers Add additional init containers to the Client pod(s)
## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
## e.g:
## initContainers:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## command: ['sh', '-c', 'echo "hello world"']
##
initContainers: []
## Network Policy configuration
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
##
networkPolicy:
## @param client.networkPolicy.enabled Enable creation of NetworkPolicy resources
##
enabled: true
## @param client.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
##
allowExternalEgress: true
## @param client.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraIngress:
## - ports:
## - port: 1234
## from:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
##
extraIngress: []
## @param client.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraEgress:
## - ports:
## - port: 1234
## to:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
##
extraEgress: []
## Service account for Client to use
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
##
serviceAccount:
## @param client.serviceAccount.create Enable creation of ServiceAccount for Client pods
##
create: true
## @param client.serviceAccount.name The name of the ServiceAccount to use
## If not set and create is true, a name is generated using the common.names.fullname template
##
name: ""
## @param client.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
## Can be set to false if pods using this serviceAccount do not need to use K8s API
##
automountServiceAccountToken: false
## @param client.serviceAccount.annotations Additional custom annotations for the ServiceAccount
##
annotations: {}
## @section Deepspeed Client persistence paramaters
## Enable persistence using Persistent Volume Claims
## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
##
persistence:
## @param client.persistence.enabled Use a PVC to persist data
##
enabled: false
## @param client.persistence.storageClass discourse & sidekiq data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: ""
## @param client.persistence.existingClaim Use a existing PVC which must be created manually before bound
##
existingClaim: ""
## @param client.persistence.mountPath Path to mount the volume at
##
mountPath: /bitnami/deepspeed/data
## @param client.persistence.accessModes Persistent Volume Access Mode
##
accessModes:
- ReadWriteOnce
## @param client.persistence.dataSource Custom PVC data source
##
dataSource: {}
## @param client.persistence.selector Selector to match an existing Persistent Volume for the client data PVC
## If set, the PVC can't have a PV dynamically provisioned for it
## E.g.
## selector:
## matchLabels:
## app: my-app
##
selector: {}
## @param client.persistence.size Size of data volume
##
size: 8Gi
## @param client.persistence.labels Persistent Volume labels
##
labels: {}
## @param client.persistence.annotations Persistent Volume annotations
##
annotations: {}
## Pod Disruption Budget configuration
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
## @param client.pdb.create Enable/disable a Pod Disruption Budget creation
## @param client.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
## @param client.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `client.pdb.minAvailable` and `client.pdb.maxUnavailable` are empty.
##
pdb:
create: true
minAvailable: ""
maxUnavailable: ""
## @section Worker Deployment Parameters
##
worker:
## @param worker.enabled Enable Worker deployment
##
enabled: true
## @param worker.slotsPerNode Number of slots available per worker node
##
slotsPerNode: 1
## @param worker.extraEnvVars Array with extra environment variables to add to client nodes
## e.g:
## extraEnvVars:
## - name: FOO
## value: "bar"
##
extraEnvVars: []
## @param worker.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for client nodes
##
extraEnvVarsCM: ""
## @param worker.extraEnvVarsSecret Name of existing Secret containing extra env vars for client nodes
##
extraEnvVarsSecret: ""
## @param worker.command Override default container command (useful when using custom images)
##
command: []
## @param worker.args Override default container args (useful when using custom images)
##
args: []
## @param worker.replicaCount Number of Worker replicas to deploy
##
replicaCount: 3
## @param worker.terminationGracePeriodSeconds Worker termination grace period (in seconds)
##
terminationGracePeriodSeconds: ""
## @param worker.containerPorts.ssh SSH port for Worker
##
containerPorts:
ssh: 2222
## Configure extra options for Worker containers' liveness, readiness and startup probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
## @param worker.livenessProbe.enabled Enable livenessProbe on Worker nodes
## @param worker.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
## @param worker.livenessProbe.periodSeconds Period seconds for livenessProbe
## @param worker.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
## @param worker.livenessProbe.failureThreshold Failure threshold for livenessProbe
## @param worker.livenessProbe.successThreshold Success threshold for livenessProbe
##
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 5
successThreshold: 1
## @param worker.readinessProbe.enabled Enable readinessProbe on Worker nodes
## @param worker.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
## @param worker.readinessProbe.periodSeconds Period seconds for readinessProbe
## @param worker.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
## @param worker.readinessProbe.failureThreshold Failure threshold for readinessProbe
## @param worker.readinessProbe.successThreshold Success threshold for readinessProbe
##
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 5
successThreshold: 1
## @param worker.startupProbe.enabled Enable startupProbe on Worker containers
## @param worker.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
## @param worker.startupProbe.periodSeconds Period seconds for startupProbe
## @param worker.startupProbe.timeoutSeconds Timeout seconds for startupProbe
## @param worker.startupProbe.failureThreshold Failure threshold for startupProbe
## @param worker.startupProbe.successThreshold Success threshold for startupProbe
##
startupProbe:
enabled: false
initialDelaySeconds: 5
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 5
successThreshold: 1
## @param worker.customLivenessProbe Custom livenessProbe that overrides the default one
##
customLivenessProbe: {}
## @param worker.customReadinessProbe Custom readinessProbe that overrides the default one
##
customReadinessProbe: {}
## @param worker.customStartupProbe Custom startupProbe that overrides the default one
##
customStartupProbe: {}
## client resource requests and limits
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
## @param worker.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if worker.resources is set (worker.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "small"
## @param worker.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
##
resources: {}
## Configure Pods Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param worker.podSecurityContext.enabled Enabled Worker pods' Security Context
## @param worker.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
## @param worker.podSecurityContext.sysctls Set kernel settings using the sysctl interface
## @param worker.podSecurityContext.supplementalGroups Set filesystem extra groups
## @param worker.podSecurityContext.fsGroup Set Worker pod's Security Context fsGroup
##
podSecurityContext:
enabled: true
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
fsGroup: 1001
## Configure Container Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param worker.containerSecurityContext.enabled Enabled Worker containers' Security Context
## @param worker.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param worker.containerSecurityContext.runAsUser Set Worker containers' Security Context runAsUser
## @param worker.containerSecurityContext.runAsGroup Set Worker containers' Security Context runAsGroup
## @param worker.containerSecurityContext.runAsNonRoot Set Worker containers' Security Context runAsNonRoot
## @param worker.containerSecurityContext.readOnlyRootFilesystem Set Worker containers' Security Context runAsNonRoot
## @param worker.containerSecurityContext.allowPrivilegeEscalation Set Worker container's privilege escalation
## @param worker.containerSecurityContext.capabilities.drop Set Worker container's Security Context runAsNonRoot
## @param worker.containerSecurityContext.seccompProfile.type Set Worker container's Security Context seccomp profile
## @param worker.containerSecurityContext.privileged Set Worker container's Security Context privileged
##
containerSecurityContext:
enabled: true
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
privileged: false
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
## @param worker.lifecycleHooks for the client container(s) to automate configuration before or after startup
##
lifecycleHooks: {}
## @param worker.runtimeClassName Name of the runtime class to be used by pod(s)
## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/
##
runtimeClassName: ""
## @param worker.automountServiceAccountToken Mount Service Account token in pod
##
automountServiceAccountToken: false
## @param worker.hostAliases client pods host aliases
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
##
hostAliases: []
## @param worker.labels Labels for the worker deployment
##
labels: {}
## @param worker.annotations Annotations for the worker deployment
##
annotations: {}
## @param worker.podLabels Extra labels for client pods
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
##
podLabels: {}
## @param worker.podAnnotations Annotations for client pods
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## @param worker.podAffinityPreset Pod affinity preset. Ignored if `client.affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param worker.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `client.affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## Node client.affinity preset
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
##
nodeAffinityPreset:
## @param worker.nodeAffinityPreset.type Node affinity preset type. Ignored if `client.affinity` is set. Allowed values: `soft` or `hard`
##
type: ""
## @param worker.nodeAffinityPreset.key Node label key to match. Ignored if `client.affinity` is set
##
key: ""
## @param worker.nodeAffinityPreset.values Node label values to match. Ignored if `client.affinity` is set
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## @param worker.affinity Affinity for Worker pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## NOTE: `worker.podAffinityPreset`, `worker.podAntiAffinityPreset`, and `worker.nodeAffinityPreset` will be ignored when it's set
##
affinity: {}
## @param worker.nodeSelector Node labels for Worker pods assignment
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
##
nodeSelector: {}
## @param worker.tolerations Tolerations for Worker pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param worker.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
##
topologySpreadConstraints: []
## @param worker.podManagementPolicy Statefulset Pod management policy, it needs to be Parallel to be able to complete the cluster join
## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
##
podManagementPolicy: Parallel
## @param worker.priorityClassName Worker pods' priorityClassName
##
priorityClassName: ""
## @param worker.schedulerName Kubernetes pod scheduler registry
## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
## @param worker.updateStrategy.type Worker statefulset strategy type
## @param worker.updateStrategy.rollingUpdate Worker statefulset rolling update configuration parameters
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
##
updateStrategy:
type: RollingUpdate
rollingUpdate: {}
## @param worker.extraVolumes Optionally specify extra list of additional volumes for the Worker pod(s)
##
extraVolumes: []
## @param worker.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Worker container(s)
##
extraVolumeMounts: []
## @param worker.sidecars Add additional sidecar containers to the Worker pod(s)
## e.g:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
sidecars: []
## @param worker.enableDefaultInitContainers Deploy default init containers
##
enableDefaultInitContainers: true
## @param worker.initContainers Add additional init containers to the Worker pod(s)
## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
## e.g:
## initContainers:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## command: ['sh', '-c', 'echo "hello world"']
##
initContainers: []
## @param worker.headlessServiceAnnotations Annotations for the headless service
##
headlessServiceAnnotations: {}
## @section Worker Traffic Exposure Parameters
##
externalAccess:
## @param worker.externalAccess.enabled Create a service per worker node
##
enabled: false
service:
## @param worker.externalAccess.service.type Worker service type
##
type: ClusterIP
## @param worker.externalAccess.service.loadBalancerIPs Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount
## e.g:
## loadBalancerIPs:
## - X.X.X.X
## - Y.Y.Y.Y
##
loadBalancerIPs: []
## @param worker.externalAccess.service.externalIPs Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount
## e.g:
## externalIPs:
## - X.X.X.X
## - Y.Y.Y.Y
##
externalIPs: []
## @param worker.externalAccess.service.loadBalancerAnnotations Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount
## e.g:
## loadBalancerAnnotations:
## - external-dns.alpha.kubernetes.io/hostname: broker1.external.example.com.
## - external-dns.alpha.kubernetes.io/hostname: broker2.external.example.com.
##
loadBalancerAnnotations: []
## @param worker.externalAccess.service.publishNotReadyAddresses Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready
## ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/
##
publishNotReadyAddresses: false
## @param worker.externalAccess.service.ports.ssh Worker GRPC service port
##
ports:
ssh: 22
## Node ports to expose
## NOTE: choose port between <30000-32767>
## @param worker.externalAccess.service.nodePorts Array of node ports used for each Kafka broker. Length must be the same as replicaCount
##
nodePorts: []
## @param worker.externalAccess.service.sessionAffinityConfig Additional settings for the sessionAffinity
## sessionAffinityConfig:
## clientIP:
## timeoutSeconds: 300
##
sessionAffinityConfig: {}
## @param worker.externalAccess.service.sessionAffinity Control where client requests go, to the same pod or round-robin
## Values: ClientIP or None
## ref: https://kubernetes.io/docs/concepts/services-networking/service/
##
sessionAffinity: None
## @param worker.externalAccess.service.loadBalancerSourceRanges Worker service Load Balancer sources
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
## e.g:
## loadBalancerSourceRanges:
## - 10.10.10.0/24
##
loadBalancerSourceRanges: []
## @param worker.externalAccess.service.externalTrafficPolicy Worker service external traffic policy
## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param worker.externalAccess.service.labels Additional custom labels for Worker service
##
labels: {}
## @param worker.externalAccess.service.annotations Additional custom annotations for Worker service
##
annotations: {}
## @param worker.externalAccess.service.extraPorts Extra ports to expose in the Worker service
##
extraPorts: []
serviceAccount:
## @param worker.serviceAccount.create Enable creation of ServiceAccount for Data Coordinator pods
##
create: true
## @param worker.serviceAccount.name The name of the ServiceAccount to use
## If not set and create is true, a name is generated using the common.names.fullname template
##
name: ""
## @param worker.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
## Can be set to false if pods using this serviceAccount do not need to use K8s API
##
automountServiceAccountToken: false
## @param worker.serviceAccount.annotations Additional custom annotations for the ServiceAccount
##
annotations: {}
## Network Policy configuration
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
##
networkPolicy:
## @param worker.networkPolicy.enabled Enable creation of NetworkPolicy resources
##
enabled: true
## @param worker.networkPolicy.allowExternal The Policy model to apply
## When set to false, only pods with the correct client label will have network access to the ports Deepspeed is
## listening on. When true, Deepspeed will accept connections from any source (with the correct destination port).
##
allowExternal: true
## @param worker.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
##
allowExternalEgress: true
## @param worker.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraIngress:
## - ports:
## - port: 1234
## from:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
##
extraIngress: []
## @param worker.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraEgress:
## - ports:
## - port: 1234
## to:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
##
extraEgress: []
## @param worker.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
## @param worker.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
##
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## @section Deepspeed Worker persistence paramaters
## Enable persistence using Persistent Volume Claims
## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
##
persistence:
## @param worker.persistence.enabled Use a PVC to persist data
##
enabled: false
## @param worker.persistence.storageClass discourse & sidekiq data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: ""
## @param worker.persistence.existingClaim Use a existing PVC which must be created manually before bound
##
existingClaim: ""
## @param worker.persistence.mountPath Path to mount the volume at
##
mountPath: /bitnami/deepspeed/data
## @param worker.persistence.accessModes Persistent Volume Access Mode
##
accessModes:
- ReadWriteOnce
## @param worker.persistence.selector Selector to match an existing Persistent Volume for the worker data PVC
## If set, the PVC can't have a PV dynamically provisioned for it
## E.g.
## selector:
## matchLabels:
## app: my-app
##
selector: {}
## @param worker.persistence.dataSource Custom PVC data source
##
dataSource: {}
## @param worker.persistence.size Size of data volume
##
size: 8Gi
## @param worker.persistence.labels Persistent Volume labels
##
labels: {}
## @param worker.persistence.annotations Persistent Volume annotations
##
annotations: {}
## Pod Disruption Budget configuration
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
## @param worker.pdb.create Enable/disable a Pod Disruption Budget creation
## @param worker.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
## @param worker.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `worker.pdb.minAvailable` and `worker.pdb.maxUnavailable` are empty.
##
pdb:
create: true
minAvailable: ""
maxUnavailable: ""
## Bitnami git image version
## ref: https://hub.docker.com/r/bitnami/git/tags/
## @param gitImage.registry [default: REGISTRY_NAME] Git image registry
## @param gitImage.repository [default: REPOSITORY_NAME/git] Git image repository
## @skip gitImage.tag Git image tag (immutable tags are recommended)
## @param gitImage.digest Git image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
## @param gitImage.pullPolicy Git image pull policy
## @param gitImage.pullSecrets Specify docker-registry secret names as an array
##
gitImage:
registry: docker.io
repository: bitnami/git
tag: 2.45.1-debian-12-r0
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Example:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.
##
volumePermissions:
## @param volumePermissions.enabled Enable init container that changes volume permissions in the data directory
##
enabled: false
## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry
## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image repository
## @skip volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended)
## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy
## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array
##
image:
registry: docker.io
repository: bitnami/os-shell
tag: 12-debian-12-r21
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## e.g:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## Init container' resource requests and limits
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
## We usually recommend not to specify default resources and to leave this as a conscious
## choice for the user. This also increases chances charts run on environments with little
## resources, such as Minikube. If you do want to specify resources, uncomment the following
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "nano"
## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
##
resources: {}